From ca16f1e8a703491bcaac0d13379d2556e8ca837d Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Fri, 26 May 2023 23:29:05 +0300 Subject: std.Target adjustments * move `ptrBitWidth` from Arch to Target since it needs to know about the abi * double isn't always 8 bits * AVR uses 1-byte alignment for everything in GCC --- src/arch/riscv64/CodeGen.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d4c7eb0c70..5fb07c5fdc 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -826,7 +826,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(inst, gp)) |reg| { -- cgit v1.2.3 From 9aec2758cc29d27c31dcb0b4bb040484a885ef23 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 15:01:45 -0700 Subject: stage2: start the InternPool transition Instead of doing everything at once which is a hopelessly large task, this introduces a piecemeal transition that can be done in small increments at a time. This is a minimal changeset that keeps the compiler compiling. It only uses the InternPool for a small set of types. Behavior tests are not passing. Air.Inst.Ref and Zir.Inst.Ref are separated into different enums but compile-time verified to have the same fields in the same order. The large set of changes is mainly to deal with the fact that most Type and Value methods now require a Module to be passed in, so that the InternPool object can be accessed. --- src/Air.zig | 125 ++- src/AstGen.zig | 6 +- src/Autodoc.zig | 2 - src/Compilation.zig | 3 +- src/InternPool.zig | 518 ++++++++- src/Liveness.zig | 8 +- src/Module.zig | 335 ++++-- src/RangeSet.zig | 13 +- src/Sema.zig | 2383 +++++++++++++++++++++++------------------- src/TypedValue.zig | 42 +- src/Zir.zig | 518 ++------- src/arch/aarch64/CodeGen.zig | 351 ++++--- src/arch/aarch64/abi.zig | 36 +- src/arch/arm/CodeGen.zig | 342 +++--- src/arch/arm/abi.zig | 41 +- src/arch/riscv64/CodeGen.zig | 72 +- src/arch/riscv64/abi.zig | 18 +- src/arch/sparc64/CodeGen.zig | 239 +++-- src/arch/wasm/CodeGen.zig | 966 +++++++++-------- src/arch/wasm/abi.zig | 41 +- src/arch/x86_64/CodeGen.zig | 774 +++++++------- src/arch/x86_64/abi.zig | 82 +- src/codegen.zig | 204 ++-- src/codegen/c.zig | 727 ++++++------- src/codegen/c/type.zig | 150 ++- src/codegen/llvm.zig | 1557 ++++++++++++++------------- src/codegen/spirv.zig | 208 ++-- src/link/Coff.zig | 7 +- src/link/Dwarf.zig | 103 +- src/link/Elf.zig | 9 +- src/link/MachO.zig | 11 +- src/link/Plan9.zig | 9 +- src/link/Wasm.zig | 16 +- src/print_air.zig | 8 +- src/print_zir.zig | 12 +- src/target.zig | 128 --- src/type.zig | 1313 ++++++++++++----------- src/value.zig | 880 ++++++++-------- 38 files changed, 6473 insertions(+), 5784 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 7ee36206f1..b60e8eda9d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -5,10 +5,12 @@ const std = @import("std"); const builtin = @import("builtin"); -const Value = @import("value.zig").Value; -const Type = @import("type.zig").Type; const assert = std.debug.assert; + const Air = @This(); +const Value = @import("value.zig").Value; +const Type = @import("type.zig").Type; +const InternPool = @import("InternPool.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -837,7 +839,88 @@ pub const Inst = struct { /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; - pub const Ref = @import("Zir.zig").Inst.Ref; + pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This Ref does not correspond to any AIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = std.math.maxInt(u32), + _, + }; /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as @@ -1066,10 +1149,13 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].ty; + if (ref_int < InternPool.static_keys.len) { + return .{ + .ip_index = InternPool.static_keys[ref_int].typeOf(), + .legacy = undefined, + }; } - return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); + return air.typeOfIndex(ref_int - ref_start_index); } pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { @@ -1286,11 +1372,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand); - switch (callee_ty.zigTypeTag()) { - .Fn => return callee_ty.fnReturnType(), - .Pointer => return callee_ty.childType().fnReturnType(), - else => unreachable, - } + return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { @@ -1328,11 +1410,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @enumToInt(ref); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - var buffer: Value.ToTypeBuffer = undefined; - return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer); + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toType(); } - const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); assert(air_tags[inst_index] == .const_ty); @@ -1367,7 +1449,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.* = undefined; } -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); @@ -1383,17 +1465,18 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].val; + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(), + else => return air.typeOfIndex(inst_index).onePossibleValue(mod), } } diff --git a/src/AstGen.zig b/src/AstGen.zig index b38067fd03..6461b11d80 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -8530,7 +8530,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ @@ -10298,10 +10298,6 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 879f0a6b15..c20c5771dd 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -95,8 +95,6 @@ pub fn generateZirData(self: *Autodoc) !void { } } - log.debug("Ref map size: {}", .{Ref.typed_value_map.len}); - const root_src_dir = self.comp_module.main_pkg.root_src_directory; const root_src_path = self.comp_module.main_pkg.root_src_path; const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path}); diff --git a/src/Compilation.zig b/src/Compilation.zig index cbdc789d40..15e393c35c 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .emit_h = emit_h, .error_name_list = .{}, }; - try module.error_name_list.append(gpa, "(no error)"); + try module.init(); break :blk module; } else blk: { @@ -2064,6 +2064,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.emit_docs) |doc_location| { if (comp.bin_file.options.module) |module| { + if (true) @panic("TODO: get autodoc working again in this branch"); var autodoc = Autodoc.init(module, doc_location); defer autodoc.deinit(); try autodoc.generateZirData(); diff --git a/src/InternPool.zig b/src/InternPool.zig index 74155ca657..b835315e5a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,11 +1,16 @@ +//! All interned objects have both a value and a type. + map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, -const InternPool = @This(); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; + +const InternPool = @This(); +const DeclIndex = enum(u32) { _ }; const KeyAdapter = struct { intern_pool: *const InternPool, @@ -17,24 +22,21 @@ const KeyAdapter = struct { pub fn hash(ctx: @This(), a: Key) u32 { _ = ctx; - return a.hash(); + return a.hash32(); } }; pub const Key = union(enum) { - int_type: struct { - signedness: std.builtin.Signedness, - bits: u16, - }, + int_type: IntType, ptr_type: struct { elem_type: Index, - sentinel: Index, - alignment: u16, + sentinel: Index = .none, + alignment: u16 = 0, size: std.builtin.Type.Pointer.Size, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: std.builtin.AddressSpace, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + address_space: std.builtin.AddressSpace = .generic, }, array_type: struct { len: u64, @@ -52,20 +54,52 @@ pub const Key = union(enum) { error_set_type: Index, payload_type: Index, }, - simple: Simple, + simple_type: SimpleType, + simple_value: SimpleValue, + extern_func: struct { + ty: Index, + /// The Decl that corresponds to the function itself. + owner_decl: DeclIndex, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: u32, + }, + int: struct { + ty: Index, + big_int: BigIntConst, + }, + enum_tag: struct { + ty: Index, + tag: BigIntConst, + }, + struct_type: struct { + fields_len: u32, + // TODO move Module.Struct data to here + }, + + pub const IntType = std.builtin.Type.Int; - pub fn hash(key: Key) u32 { + pub fn hash32(key: Key) u32 { + return @truncate(u32, key.hash64()); + } + + pub fn hash64(key: Key) u64 { var hasher = std.hash.Wyhash.init(0); + key.hashWithHasher(&hasher); + return hasher.final(); + } + + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { switch (key) { .int_type => |int_type| { - std.hash.autoHash(&hasher, int_type); + std.hash.autoHash(hasher, int_type); }, .array_type => |array_type| { - std.hash.autoHash(&hasher, array_type); + std.hash.autoHash(hasher, array_type); }, else => @panic("TODO"), } - return @truncate(u32, hasher.final()); } pub fn eql(a: Key, b: Key) bool { @@ -85,6 +119,34 @@ pub const Key = union(enum) { else => @panic("TODO"), } } + + pub fn typeOf(key: Key) Index { + switch (key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .optional_type, + .error_union_type, + .simple_type, + .struct_type, + => return .type_type, + + .int => |x| return x.ty, + .extern_func => |x| return x.ty, + .enum_tag => |x| return x.ty, + + .simple_value => |s| switch (s) { + .undefined => return .undefined_type, + .void => return .void_type, + .null => return .null_type, + .false, .true => return .bool_type, + .empty_struct => return .empty_struct_type, + .@"unreachable" => return .noreturn_type, + .generic_poison => unreachable, + }, + } + } }; pub const Item = struct { @@ -98,11 +160,330 @@ pub const Item = struct { /// Two values which have the same type can be equality compared simply /// by checking if their indexes are equal, provided they are both in /// the same `InternPool`. +/// When adding a tag to this enum, consider adding a corresponding entry to +/// `primitives` in AstGen.zig. pub const Index = enum(u32) { + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + single_const_pointer_to_comptime_int_type, + const_slice_u8_type, + anyerror_void_error_union_type, + generic_poison_type, + var_args_param_type, + empty_struct_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `0` (usize) + zero_usize, + /// `1` (comptime_int) + one, + /// `1` (usize) + one_usize, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + none = std.math.maxInt(u32), + _, + + pub fn toType(i: Index) @import("type.zig").Type { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } + + pub fn toValue(i: Index) @import("value.zig").Value { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } +}; + +pub const static_keys = [_]Key{ + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 1, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 29, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 80, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 128, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 128, + } }, + + .{ .simple_type = .usize }, + .{ .simple_type = .isize }, + .{ .simple_type = .c_char }, + .{ .simple_type = .c_short }, + .{ .simple_type = .c_ushort }, + .{ .simple_type = .c_int }, + .{ .simple_type = .c_uint }, + .{ .simple_type = .c_long }, + .{ .simple_type = .c_ulong }, + .{ .simple_type = .c_longlong }, + .{ .simple_type = .c_ulonglong }, + .{ .simple_type = .c_longdouble }, + .{ .simple_type = .f16 }, + .{ .simple_type = .f32 }, + .{ .simple_type = .f64 }, + .{ .simple_type = .f80 }, + .{ .simple_type = .f128 }, + .{ .simple_type = .anyopaque }, + .{ .simple_type = .bool }, + .{ .simple_type = .void }, + .{ .simple_type = .type }, + .{ .simple_type = .anyerror }, + .{ .simple_type = .comptime_int }, + .{ .simple_type = .comptime_float }, + .{ .simple_type = .noreturn }, + .{ .simple_type = .@"anyframe" }, + .{ .simple_type = .null }, + .{ .simple_type = .undefined }, + .{ .simple_type = .enum_literal }, + .{ .simple_type = .atomic_order }, + .{ .simple_type = .atomic_rmw_op }, + .{ .simple_type = .calling_convention }, + .{ .simple_type = .address_space }, + .{ .simple_type = .float_mode }, + .{ .simple_type = .reduce_op }, + .{ .simple_type = .call_modifier }, + .{ .simple_type = .prefetch_options }, + .{ .simple_type = .export_options }, + .{ .simple_type = .extern_options }, + .{ .simple_type = .type_info }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .comptime_int_type, + .size = .One, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Slice, + .is_const = true, + } }, + + .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = .void_type, + } }, + + // generic_poison_type + .{ .simple_type = .generic_poison }, + + // var_args_param_type + .{ .simple_type = .var_args_param }, + + // empty_struct_type + .{ .struct_type = .{ + .fields_len = 0, + } }, + + .{ .simple_value = .undefined }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)}, + .positive = true, + }, + } }, + + .{ .simple_value = .void }, + .{ .simple_value = .@"unreachable" }, + .{ .simple_value = .null }, + .{ .simple_value = .true }, + .{ .simple_value = .false }, + .{ .simple_value = .empty_struct }, + .{ .simple_value = .generic_poison }, }; +/// How many items in the InternPool are statically known. +pub const static_len: u32 = static_keys.len; + pub const Tag = enum(u8) { /// An integer type. /// data is number of bits @@ -113,9 +494,12 @@ pub const Tag = enum(u8) { /// An array type. /// data is payload to Array. type_array, - /// A type or value that can be represented with only an enum tag. - /// data is Simple enum value - simple, + /// A type that can be represented with only an enum tag. + /// data is SimpleType enum value. + simple_type, + /// A value that can be represented with only an enum tag. + /// data is SimpleValue enum value. + simple_value, /// An unsigned integer value that can be represented by u32. /// data is integer value int_u32, @@ -137,9 +521,20 @@ pub const Tag = enum(u8) { /// A float value that can be represented by f128. /// data is payload index to Float128. float_f128, + /// An extern function. + extern_func, + /// A regular function. + func, + /// Represents the data that an enum declaration provides, when the fields + /// are auto-numbered, and there are no declarations. + /// data is payload index to `EnumSimple`. + enum_simple, }; -pub const Simple = enum(u32) { +/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to +/// implement logic that only wants to deal with types because the logic can +/// ignore all simple values. Note that technically, types are values. +pub const SimpleType = enum(u32) { f16, f32, f64, @@ -147,6 +542,7 @@ pub const Simple = enum(u32) { f128, usize, isize, + c_char, c_short, c_ushort, c_int, @@ -165,14 +561,36 @@ pub const Simple = enum(u32) { comptime_float, noreturn, @"anyframe", - null_type, - undefined_type, - enum_literal_type, + null, undefined, - void_value, + enum_literal, + + atomic_order, + atomic_rmw_op, + calling_convention, + address_space, + float_mode, + reduce_op, + call_modifier, + prefetch_options, + export_options, + extern_options, + type_info, + + generic_poison, + var_args_param, +}; + +pub const SimpleValue = enum(u32) { + undefined, + void, null, - bool_true, - bool_false, + empty_struct, + true, + false, + @"unreachable", + + generic_poison, }; pub const Array = struct { @@ -180,10 +598,44 @@ pub const Array = struct { child: Index, }; +/// Trailing: +/// 0. field name: null-terminated string index for each fields_len; declaration order +pub const EnumSimple = struct { + /// The Decl that corresponds to the enum itself. + owner_decl: DeclIndex, + /// An integer type which is used for the numerical value of the enum. This + /// is inferred by Zig to be the smallest power of two unsigned int that + /// fits the number of fields. It is stored here to avoid unnecessary + /// calculations and possibly allocation failure when querying the tag type + /// of enums. + int_tag_ty: Index, + fields_len: u32, +}; + +pub fn init(ip: *InternPool, gpa: Allocator) !void { + assert(ip.items.len == 0); + + // So that we can use `catch unreachable` below. + try ip.items.ensureUnusedCapacity(gpa, static_keys.len); + try ip.map.ensureUnusedCapacity(gpa, static_keys.len); + try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + + // This inserts all the statically-known values into the intern pool in the + // order expected. + for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; + + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + assert(ip.items.len == static_keys.len); +} + pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.* = undefined; } pub fn indexToKey(ip: InternPool, index: Index) Key { @@ -210,7 +662,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple => .{ .simple = @intToEnum(Simple, data) }, + .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, + .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, else => @panic("TODO"), }; @@ -224,12 +677,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (key) { .int_type => |int_type| { - const tag: Tag = switch (int_type.signedness) { + const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; try ip.items.append(gpa, .{ - .tag = tag, + .tag = t, .data = int_type.bits, }); }, @@ -249,6 +702,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } +pub fn tag(ip: InternPool, index: Index) Tag { + const tags = ip.items.items(.tag); + return tags[@enumToInt(index)]; +} + fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try ip.extra.ensureUnusedCapacity(gpa, fields.len); diff --git a/src/Liveness.zig b/src/Liveness.zig index 59135ef5c8..45d0705008 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -5,15 +5,17 @@ //! Some instructions are special, such as: //! * Conditional Branches //! * Switch Branches -const Liveness = @This(); const std = @import("std"); -const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; +const Liveness = @This(); +const trace = @import("tracy.zig").trace; +const Air = @import("Air.zig"); +const InternPool = @import("InternPool.zig"); + pub const Verify = @import("Liveness/Verify.zig"); /// This array is split into sets of 4 bits per AIR instruction. diff --git a/src/Module.zig b/src/Module.zig index a8f2281c4f..5756955d3c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -32,6 +32,19 @@ const build_options = @import("build_options"); const Liveness = @import("Liveness.zig"); const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); +const InternPool = @import("InternPool.zig"); + +comptime { + @setEvalBranchQuota(4000); + for ( + @typeInfo(Zir.Inst.Ref).Enum.fields, + @typeInfo(Air.Inst.Ref).Enum.fields, + @typeInfo(InternPool.Index).Enum.fields, + ) |zir_field, air_field, ip_field| { + assert(mem.eql(u8, zir_field.name, ip_field.name)); + assert(mem.eql(u8, air_field.name, ip_field.name)); + } +} /// General-purpose allocator. Used for both temporary and long-term storage. gpa: Allocator, @@ -83,6 +96,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, string_literal_bytes: ArrayListUnmanaged(u8) = .{}, +/// Stores all Type and Value objects; periodically garbage collected. +intern_pool: InternPool = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -807,9 +823,9 @@ pub const Decl = struct { return (try decl.typedValue()).val; } - pub fn isFunction(decl: Decl) !bool { + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); - return tv.ty.zigTypeTag() == .Fn; + return tv.ty.zigTypeTag(mod) == .Fn; } /// If the Decl has a value and it is a struct, return it, @@ -921,14 +937,14 @@ pub const Decl = struct { }; } - pub fn getAlignment(decl: Decl, target: Target) u32 { + pub fn getAlignment(decl: Decl, mod: *const Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. return decl.@"align"; } else { // Natural alignment. - return decl.ty.abiAlignment(target); + return decl.ty.abiAlignment(mod); } } }; @@ -1030,7 +1046,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - target: Target, + mod: *const Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1038,24 +1054,26 @@ pub const Struct = struct { return field.abi_align; } + const target = mod.getTarget(); + switch (layout) { .Packed => return 0, .Auto => { if (target.ofmt == .c) { - return alignmentExtern(field, target); + return alignmentExtern(field, mod); } else { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } }, - .Extern => return alignmentExtern(field, target), + .Extern => return alignmentExtern(field, mod), } } - pub fn alignmentExtern(field: Field, target: Target) u32 { + pub fn alignmentExtern(field: Field, mod: *const Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. - const ty_abi_align = field.ty.abiAlignment(target); + const ty_abi_align = field.ty.abiAlignment(mod); - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. return @max(ty_abi_align, 16); @@ -1132,7 +1150,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1140,12 +1158,13 @@ pub const Struct = struct { if (i == index) { return @intCast(u16, bit_sum); } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } unreachable; // index out of bounds } pub const RuntimeFieldIterator = struct { + module: *const Module, struct_obj: *const Struct, index: u32 = 0, @@ -1155,6 +1174,7 @@ pub const Struct = struct { }; pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex { + const mod = it.module; while (true) { var i = it.index; it.index += 1; @@ -1167,15 +1187,18 @@ pub const Struct = struct { } const field = it.struct_obj.fields.values()[i]; - if (!field.is_comptime and field.ty.hasRuntimeBits()) { + if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) { return FieldAndIndex{ .index = i, .field = field }; } } } }; - pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator { - return .{ .struct_obj = s }; + pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator { + return .{ + .struct_obj = s, + .module = module, + }; } }; @@ -1323,9 +1346,9 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, target: Target) u32 { + pub fn normalAlignment(field: Field, mod: *const Module) u32 { if (field.abi_align == 0) { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } else { return field.abi_align; } @@ -1383,22 +1406,22 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasRuntimeBits()) return false; + if (field.ty.hasRuntimeBits(mod)) return false; } return true; } - pub fn mostAlignedField(u: Union, target: Target) u32 { + pub fn mostAlignedField(u: Union, mod: *const Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values(), 0..) |field, i| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); if (field_align > most_alignment) { most_alignment = field_align; most_index = i; @@ -1408,20 +1431,20 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 { var max_align: u32 = 0; - if (have_tag) max_align = u.tag_ty.abiAlignment(target); + if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); max_align = @max(max_align, field_align); } return max_align; } - pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 { - return u.getLayout(target, have_tag).abi_size; + pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 { + return u.getLayout(mod, have_tag).abi_size; } pub const Layout = struct { @@ -1451,7 +1474,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -1460,16 +1483,16 @@ pub const Union = struct { var payload_align: u32 = 0; const fields = u.fields.values(); for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = a: { if (field.abi_align == 0) { - break :a field.ty.abiAlignment(target); + break :a field.ty.abiAlignment(mod); } else { break :a field.abi_align; } }; - const field_size = field.ty.abiSize(target); + const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; biggest_field = @intCast(u32, i); @@ -1481,7 +1504,7 @@ pub const Union = struct { } } payload_align = @max(payload_align, 1); - if (!have_tag or !u.tag_ty.hasRuntimeBits()) { + if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_align = payload_align, @@ -1497,8 +1520,8 @@ pub const Union = struct { } // Put the tag before or after the payload depending on which one's // alignment is greater. - const tag_size = u.tag_ty.abiSize(target); - const tag_align = @max(1, u.tag_ty.abiAlignment(target)); + const tag_size = u.tag_ty.abiSize(mod); + const tag_align = @max(1, u.tag_ty.abiAlignment(mod)); var size: u64 = 0; var padding: u32 = undefined; if (tag_align >= payload_align) { @@ -2281,7 +2304,7 @@ pub const ErrorMsg = struct { ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); - err_msg.* = try init(gpa, src_loc, format, args); + err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); return err_msg; } @@ -3391,6 +3414,12 @@ pub const CompileError = error{ ComptimeBreak, }; +pub fn init(mod: *Module) !void { + const gpa = mod.gpa; + try mod.error_name_list.append(gpa, "(no error)"); + try mod.intern_pool.init(gpa); +} + pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -3518,6 +3547,8 @@ pub fn deinit(mod: *Module) void { mod.string_literal_table.deinit(gpa); mod.string_literal_bytes.deinit(gpa); + + mod.intern_pool.deinit(gpa); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { @@ -4277,7 +4308,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // Update all dependents which have at least this level of dependency. // If our type remained the same and we're a function, only update // decls which depend on our body; otherwise, update all dependents. - const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal; + const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal; for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| { if (@enumToInt(dep_type) < @enumToInt(update_level)) continue; @@ -4748,8 +4779,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - var buffer: Value.ToTypeBuffer = undefined; - const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); + const ty = try decl_tv.val.toType().copy(decl_arena_allocator); if (ty.getNamespace() == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4775,7 +4805,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; @@ -5510,7 +5540,7 @@ pub fn clearDecl( try mod.deleteDeclExports(decl_index); if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { + if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } if (decl.getInnerNamespace()) |namespace| { @@ -5699,7 +5729,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (arg_tv.val.tag() != .generic_poison) arg_tv.val - else if (arg_tv.ty.onePossibleValue()) |opv| + else if (arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -5773,7 +5803,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // If we don't get an error return trace from a caller, create our own. if (func.calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and - !sema.fn_ret_ty.isError()) + !sema.fn_ret_ty.isError(mod)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { // TODO make these unreachable instead of @panic @@ -5995,25 +6025,11 @@ pub fn initNewAnonDecl( // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits()) { + if (typed_value.ty.isFnOrHasRuntimeBits(mod)) { try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); } } -pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { - const int_payload = try arena.create(Type.Payload.Bits); - int_payload.* = .{ - .base = .{ - .tag = switch (signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - }, - }, - .data = bits, - }; - return Type.initPayload(&int_payload.base); -} - pub fn errNoteNonLazy( mod: *Module, src_loc: SrcLoc, @@ -6779,3 +6795,204 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool { .field_reordering => mod.comp.bin_file.options.use_llvm, }; } + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { + return mod.intern_pool.get(mod.gpa, key); +} + +pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + const i = try intern(mod, .{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } }); + return i.toType(); +} + +pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { + return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { + assert(!min.isUndef()); + assert(!max.isUndef()); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, mod).compare(.lte)); + } + + const sign = min.orderAgainstZero(mod) == .lt; + + const min_val_bits = intBitsForValue(mod, min, sign); + const max_val_bits = intBitsForValue(mod, max, sign); + + return mod.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { + assert(!val.isUndef()); + switch (val.tag()) { + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; + return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + // Zero is still a possibility, in which case unsigned is fine + for (limbs) |limb| { + if (limb != 0) break; + } else return 0; // val == 0 + assert(sign); + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; + return @intCast(u16, big.bitCountTwosComp()); + }, + .int_i64 => { + const x = val.castTag(.int_i64).?.data; + if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + assert(sign); + return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + else => { + const x = val.toUnsignedInt(mod); + return Type.smallestUnsignedBits(x) + @boolToInt(sign); + }, + } +} + +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returns 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +// TODO this function does not take into account CPU features, which can affect +// this value. Audit this! +pub fn atomicPtrAlignment( + mod: *const Module, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + const target = mod.getTarget(); + const max_atomic_bits: u16 = switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => 16, + + .arc, + .arm, + .armeb, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .nvptx, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparc, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .amdil, + .hsail, + .spir, + .kalimba, + .lanai, + .shave, + .wasm32, + .renderscript32, + .csky, + .spirv32, + .dxil, + .loongarch32, + .xtensa, + => 32, + + .amdgcn, + .bpfel, + .bpfeb, + .le64, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .s390x, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .ve, + .spirv64, + .loongarch64, + => 64, + + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, + }; + + const int_ty = switch (ty.zigTypeTag(mod)) { + .Int => ty, + .Enum => ty.intTagType(), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime(mod)) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + return 0; +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index aa051ff424..2e28a562c6 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -60,13 +60,14 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { if (self.ranges.items.len == 0) return false; + const mod = self.module; std.mem.sort(Range, self.ranges.items, LessThanContext{ .ty = ty, - .module = self.module, + .module = mod, }, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, self.module) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) + if (!self.ranges.items[0].first.eql(first, ty, mod) or + !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod)) { return false; } @@ -76,18 +77,16 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); - const target = self.module.getTarget(); - // look for gaps for (self.ranges.items[1..], 0..) |cur, i| { // i starts counting from the second item. const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, target)); + try counter.copy(prev.last.toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, target); + const cur_start_int = cur.first.toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 9e21bfa83d..9b76fee68e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -114,6 +114,7 @@ const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); +const InternPool = @import("InternPool.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -1614,6 +1615,7 @@ fn analyzeBodyInner( }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1621,18 +1623,18 @@ fn analyzeBodyInner( const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1654,11 +1656,11 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1721,17 +1723,12 @@ fn analyzeBodyInner( } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { - var i: usize = @enumToInt(zir_ref); - + const i = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. - if (i < Zir.Inst.Ref.typed_value_map.len) { - // We intentionally map the same indexes to the same values between ZIR and AIR. - return zir_ref; - } - i -= Zir.Inst.Ref.typed_value_map.len; - - // Finally, the last section of indexes refers to the map of ZIR=>AIR. - const inst = sema.inst_map.get(@intCast(u32, i)).?; + // We intentionally map the same indexes to the same values between ZIR and AIR. + if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); + // The last section of indexes refers to the map of ZIR => AIR. + const inst = sema.inst_map.get(i - InternPool.static_len).?; const ty = sema.typeOf(inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return inst; @@ -1766,9 +1763,8 @@ pub fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - assert(zir_ref != .var_args_param); const air_inst = try sema.resolveInst(zir_ref); - assert(air_inst != .var_args_param); + assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return ty; @@ -1783,8 +1779,7 @@ fn analyzeAsType( const wanted_type = Type.initTag(.type); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = val.toType(&buffer); + const ty = val.toType(); return ty.copy(sema.arena); } @@ -1950,12 +1945,12 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( make_runtime: *bool, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val; + const int = @enumToInt(inst); + if (int < InternPool.static_len) { + return @intToEnum(InternPool.Index, int).toValue(); } - i -= Air.Inst.Ref.typed_value_map.len; + const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { if (air_tags[i] == .constant) { @@ -2010,13 +2005,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (ty.isSlice()) { - try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)}); + try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; }; @@ -2042,7 +2038,8 @@ fn failWithErrorSetCodeMissing( } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - if (int_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (int_ty.zigTypeTag(mod) == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod), @@ -2084,12 +2081,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError } fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { + const mod = sema.mod; const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; - if (inner_ty.zigTypeTag() == .Optional) opt: { + if (inner_ty.zigTypeTag(mod) == .Optional) opt: { var buf: Type.Payload.ElemType = undefined; const child_ty = inner_ty.optionalChild(&buf); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2097,9 +2095,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: { + } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { const child_ty = inner_ty.errorUnionPayload(); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :err; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2111,14 +2109,14 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool { - switch (ty.zigTypeTag()) { +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool { + switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { const ptr_info = ty.ptrInfo().data; if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); - } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { + } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return mem.eql(u8, field_name, "len"); } else return false; }, @@ -2352,10 +2350,10 @@ fn analyzeAsInt( dest_ty: Type, reason: []const u8, ) !u64 { + const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - const target = sema.mod.getTarget(); - return (try val.getUnsignedIntAdvanced(target, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, sema)).?; } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for @@ -2926,23 +2924,23 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) { + if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } enum_obj.tag_ty = try ty.copy(decl_arena_allocator); enum_obj.tag_ty_inferred = false; } else if (fields_len == 0) { - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0); + enum_obj.tag_ty = try mod.intType(.unsigned, 0); enum_obj.tag_ty_inferred = true; } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits); + enum_obj.tag_ty = try mod.intType(.unsigned, bits); enum_obj.tag_ty_inferred = true; } } - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) { + if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } @@ -3319,7 +3317,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet, .ErrorUnion => { const msg = msg: { @@ -3347,11 +3346,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(block, src, "error is discarded", .{}); @@ -3369,16 +3369,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer) + const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) operand_ty.childType() else operand_ty; - if (err_union_ty.zigTypeTag() != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(); + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; + const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3920,19 +3921,20 @@ fn zirArrayBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { // TODO validate element count @@ -3948,19 +3950,20 @@ fn zirFieldBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } @@ -3968,6 +3971,7 @@ fn zirFieldBasePtr( } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); @@ -3991,7 +3995,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - const is_int = switch (object_ty.zigTypeTag()) { + const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; @@ -4000,7 +4004,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - if (!object_ty.isIndexable()) { + if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); @@ -4010,7 +4014,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; return sema.failWithOwnedErrorMsg(msg); } - if (!object_ty.indexableHasLen()) continue; + if (!object_ty.indexableHasLen(mod)) continue; break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); }; @@ -4061,7 +4065,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } @@ -4096,13 +4100,14 @@ fn validateArrayInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = try sema.resolveType(block, ty_src, extra.ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Array => { const array_len = ty.arrayLen(); if (extra.init_count != array_len) { @@ -4141,11 +4146,12 @@ fn validateStructInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } @@ -4160,6 +4166,7 @@ fn zirValidateStructInit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4168,7 +4175,7 @@ fn zirValidateStructInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(); - switch (agg_ty.zigTypeTag()) { + switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, @@ -4589,6 +4596,7 @@ fn zirValidateArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4599,7 +4607,7 @@ fn zirValidateArrayInit( const array_ty = sema.typeOf(array_ptr).childType(); const array_len = array_ty.arrayLen(); - if (instrs.len != array_len) switch (array_ty.zigTypeTag()) { + if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); @@ -4667,7 +4675,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(i)) |opv| { + if (array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -4770,12 +4778,13 @@ fn zirValidateArrayInit( } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { + if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); } else switch (operand_ty.ptrSize()) { .One, .C => {}, @@ -4788,7 +4797,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr return; } - const elem_ty = operand_ty.elemType2(); + const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.fail(block, src, "cannot dereference undefined value", .{}); @@ -4818,7 +4827,8 @@ fn failWithBadMemberAccess( field_src: LazySrcLoc, field_name: []const u8, ) CompileError { - const kw_name = switch (agg_ty.zigTypeTag()) { + const mod = sema.mod; + const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", @@ -4894,8 +4904,9 @@ fn failWithBadUnionFieldAccess( } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; - const category = switch (decl_ty.zigTypeTag()) { + const mod = sema.mod; + const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return; + const category = switch (decl_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -4903,7 +4914,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo .ErrorSet => "error set", else => unreachable, }; - try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); + try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -5028,6 +5039,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; @@ -5046,9 +5058,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or - sema.typeOf(operand).zigTypeTag() == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) + if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or + sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and + sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(operand); } @@ -6270,6 +6282,7 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6342,7 +6355,7 @@ fn zirCall( sema.inst_map.putAssumeCapacity(inst, inst: { if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param; + break :inst Air.Inst.Ref.var_args_param_type; if (func_ty_info.param_types[arg_index].tag() == .generic_poison) break :inst Air.Inst.Ref.generic_poison_type; @@ -6352,10 +6365,10 @@ fn zirCall( const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag() == .NoReturn) { + if (resolved_ty.zigTypeTag(mod) == .NoReturn) { return resolved; } - if (resolved_ty.isError()) { + if (resolved_ty.isError(mod)) { input_is_error = true; } resolved_args[arg_index] = resolved; @@ -6380,7 +6393,7 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); @@ -6417,20 +6430,21 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { + const mod = sema.mod; const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and - opt_child.childType().zigTypeTag() == .Fn)) + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6488,13 +6502,14 @@ fn callBuiltin( modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, ) !void { + const mod = sema.mod; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, @@ -6715,7 +6730,7 @@ fn analyzeCall( @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), else => { - assert(callee_ty.isPtrAtRuntime()); + assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }); @@ -6978,7 +6993,7 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, module_fn, @@ -7068,7 +7083,7 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError()) { + if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7301,8 +7316,9 @@ fn analyzeGenericCallArg( new_fn_info: Type.Payload.Function.Data, runtime_i: *u32, ) !void { + const mod = sema.mod; const is_runtime = comptime_arg.val.tag() == .generic_poison and - comptime_arg.ty.hasRuntimeBits() and + comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { const param_ty = new_fn_info.param_types[runtime_i.*]; @@ -7591,7 +7607,7 @@ fn instantiateGenericCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError()) { + if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7872,8 +7888,9 @@ fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; - const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); + const ty = try mod.intType(int_type.signedness, int_type.bit_count); return sema.addType(ty); } @@ -7882,12 +7899,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); - if (child_type.zigTypeTag() == .Opaque) { + if (child_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); - } else if (child_type.zigTypeTag() == .Null) { + } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } const opt_type = try Type.optional(sema.arena, child_type); @@ -7896,14 +7914,15 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); - assert(indexable_ty.isIndexable()); // validated by a previous instruction - if (indexable_ty.zigTypeTag() == .Struct) { + assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction + if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); return sema.addType(elem_type); } else { - const elem_type = indexable_ty.elemType2(); + const elem_type = indexable_ty.elemType2(mod); return sema.addType(elem_type); } } @@ -7960,9 +7979,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - if (elem_type.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (elem_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); - } else if (elem_type.zigTypeTag() == .NoReturn) { + } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } @@ -7986,6 +8006,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -7993,7 +8014,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - if (error_set.zigTypeTag() != .ErrorSet) { + if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ error_set.fmt(sema.mod), }); @@ -8004,11 +8025,12 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - if (payload_ty.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); - } else if (payload_ty.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); @@ -8089,10 +8111,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target)); + const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); const payload = try sema.arena.create(Value.Payload.Error); @@ -8123,6 +8145,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8130,7 +8153,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -8141,9 +8164,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_ty.zigTypeTag() != .ErrorSet) + if (lhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); - if (rhs_ty.zigTypeTag() != .ErrorSet) + if (rhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. @@ -8184,6 +8207,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -8191,7 +8215,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); @@ -8213,8 +8237,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType().copy(arena); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); @@ -8231,6 +8254,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -8239,15 +8263,14 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - if (dest_ty.zigTypeTag() != .Enum) { + if (dest_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = dest_ty.intTagType(&buffer); + const int_tag_ty = dest_ty.intTagType(); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); } @@ -8329,11 +8352,12 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const optional_ptr_ty = sema.typeOf(optional_ptr); - assert(optional_ptr_ty.zigTypeTag() == .Pointer); + assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); const opt_type = optional_ptr_ty.elemType(); - if (opt_type.zigTypeTag() != .Optional) { + if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } @@ -8361,7 +8385,7 @@ fn analyzeOptionalPayloadPtr( ); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. @@ -8397,11 +8421,12 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const result_ty = switch (operand_ty.zigTypeTag()) { + const result_ty = switch (operand_ty.zigTypeTag(mod)) { .Optional => try operand_ty.optionalChildAlloc(sema.arena), .Pointer => t: { if (operand_ty.ptrSize() != .C) { @@ -8424,7 +8449,7 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } if (val.castTag(.opt_payload)) |payload| { @@ -8450,12 +8475,13 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -8468,7 +8494,7 @@ fn analyzeErrUnionPayload( block: *Block, src: LazySrcLoc, err_union_ty: Type, - operand: Zir.Inst.Ref, + operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { @@ -8517,10 +8543,11 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8594,8 +8621,9 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .ErrorUnion) { + if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.fmt(sema.mod), }); @@ -8617,13 +8645,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8677,8 +8706,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - break :blk try ret_ty_val.toType(&buffer).copy(sema.arena); + break :blk try ret_ty_val.toType().copy(sema.arena); }, }; @@ -8849,6 +8877,7 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -8890,31 +8919,6 @@ fn funcCommon( const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { - // Hot path for some common function types. - // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. - if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and - alignment.? == 0 and - address_space.? == target_util.defaultAddressSpace(target, .function) and - section == .default and - !is_noinline) - { - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_void_no_args); - } - - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) { - break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) { - break :fn_ty Type.initTag(.fn_ccc_void_no_args); - } - } - // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. @@ -8985,8 +8989,8 @@ fn funcCommon( }); }; - if (!return_type.isValidReturnType()) { - const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else ""; + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(sema.mod), @@ -9201,22 +9205,23 @@ fn analyzeParameter( has_body: bool, is_noalias: bool, ) !void { + const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); comptime_params[i] = param.is_comptime or requires_comptime; const this_generic = param.ty.tag() == .generic_poison; is_generic.* = is_generic.* or this_generic; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (!param.ty.isValidParamType()) { - const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else ""; + if (!param.ty.isValidParamType(mod)) { + const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(sema.mod), + opaque_str, param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); @@ -9228,11 +9233,11 @@ fn analyzeParameter( if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(sema.mod), @tagName(cc), + param.ty.fmt(mod), @tagName(cc), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9243,11 +9248,11 @@ fn analyzeParameter( if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(sema.mod), + param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9256,7 +9261,7 @@ fn analyzeParameter( return sema.failWithOwnedErrorMsg(msg); } if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9472,13 +9477,14 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand = try sema.resolveInst(zir_operand); - if (zir_dest_type == .var_args_param) return operand; + if (zir_dest_type == .var_args_param_type) return operand; const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; - if (dest_ty.zigTypeTag() == .NoReturn) { + if (dest_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index| @@ -9495,11 +9501,12 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (!ptr_ty.isPtrAtRuntime()) { + if (!ptr_ty.isPtrAtRuntime(mod)) { return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { @@ -9586,25 +9593,25 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); - } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); - const is_vector = dest_ty.zigTypeTag() == .Vector; + const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); - const target = sema.mod.getTarget(); - const wanted_info = dest_scalar_ty.intInfo(target); + const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { @@ -9631,9 +9638,8 @@ fn intCast( try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { - const target = sema.mod.getTarget(); - const actual_info = operand_scalar_ty.intInfo(target); - const wanted_info = dest_scalar_ty.intInfo(target); + const actual_info = operand_scalar_ty.intInfo(mod); + const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed); @@ -9642,7 +9648,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target); + const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9653,7 +9659,7 @@ fn intCast( if (actual_info.signedness == .signed) { // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits); + const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits); const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its @@ -9727,6 +9733,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9735,7 +9742,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9757,7 +9764,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, } @@ -9771,7 +9778,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, @@ -9782,7 +9789,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { - const container = switch (dest_ty.zigTypeTag()) { + const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9799,7 +9806,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Vector, => {}, } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9821,7 +9828,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, } @@ -9834,7 +9841,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, @@ -9845,7 +9852,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { - const container = switch (operand_ty.zigTypeTag()) { + const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9869,6 +9876,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9878,7 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(extra.rhs); const target = sema.mod.getTarget(); - const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { + const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( @@ -9890,7 +9898,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, @@ -9944,20 +9952,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); - if (indexable_ty.zigTypeTag() != .Pointer) { + if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ indexable_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); - if (indexable_ty.zigTypeTag() == .Array) { + if (indexable_ty.zigTypeTag(mod) == .Array) { try sema.errNote(block, src, msg, "consider using '&' here", .{}); } break :msg msg; @@ -10076,6 +10085,7 @@ fn zirSwitchCapture( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10091,7 +10101,7 @@ fn zirSwitchCapture( if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; - if (operand_ty.zigTypeTag() == .Union) { + if (operand_ty.zigTypeTag(mod) == .Union) { const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[field_index].ty; @@ -10144,7 +10154,7 @@ fn zirSwitchCapture( return operand_ptr; } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => if (block.switch_else_err_ty) |some| { return sema.bitCast(block, some, operand, operand_src, null); } else { @@ -10162,7 +10172,7 @@ fn zirSwitchCapture( switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item, }; - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => { const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const first_item = try sema.resolveInst(items[0]); @@ -10269,6 +10279,7 @@ fn zirSwitchCapture( } fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_tok; const src = inst_data.src(); @@ -10280,7 +10291,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; - if (operand_ty.zigTypeTag() != .Union) { + if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ operand_ty.fmt(sema.mod), @@ -10301,6 +10312,7 @@ fn zirSwitchCond( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; @@ -10311,7 +10323,7 @@ fn zirSwitchCond( operand_ptr; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, @@ -10371,6 +10383,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -10415,7 +10428,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const target_ty = sema.typeOf(raw_operand); break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; }; - const union_originally = maybe_union_ty.zigTypeTag() == .Union; + const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; @@ -10433,7 +10446,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var empty_enum = false; const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag() == .ErrorSet; + const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; @@ -10459,10 +10472,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - const target = sema.mod.getTarget(); - // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); @@ -10774,12 +10785,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } check_range: { - if (operand_ty.zigTypeTag() == .Int) { + if (operand_ty.zigTypeTag(mod) == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand_ty.minInt(arena.allocator(), target); - const max_int = try operand_ty.maxInt(arena.allocator(), target); + const min_int = try operand_ty.minInt(arena.allocator(), mod); + const max_int = try operand_ty.maxInt(arena.allocator(), mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11080,7 +11091,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and + if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11135,7 +11146,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) { @@ -11242,7 +11253,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { @@ -11286,7 +11297,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11409,7 +11420,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; - if (special.is_inline) switch (operand_ty.zigTypeTag()) { + if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ @@ -11429,7 +11440,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); @@ -11551,7 +11562,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.inline_case_capture = .none; if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -11563,7 +11574,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_field != null) continue; const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[index].ty; - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11629,9 +11640,9 @@ const RangeSetUnhandledIterator = struct { first: bool = true, fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const target = sema.mod.getTarget(); - const min = try ty.minInt(sema.arena, target); - const max = try ty.maxInt(sema.arena, target); + const mod = sema.mod; + const min = try ty.minInt(sema.arena, mod); + const max = try ty.maxInt(sema.arena, mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11931,18 +11942,19 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { + const mod = sema.mod; const index = Zir.refToIndex(cond) orelse return; if (sema.code.instructions.items(.tag)[index] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - if (operand_ty.zigTypeTag() == .ErrorSet) { + if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { - if (!operand_ty.isError()) return; + if (!operand_ty.isError(mod)) return; if (val.getError() == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } @@ -11972,6 +11984,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -11995,7 +12008,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; break :hf field_index < ty.structFieldCount(); } - break :hf switch (ty.zigTypeTag()) { + break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields().contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), @@ -12126,6 +12139,7 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12136,11 +12150,10 @@ fn zirShl( const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const scalar_ty = lhs_ty.scalarType(); - const scalar_rhs_ty = rhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); + const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); @@ -12156,18 +12169,18 @@ fn zirShl( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12175,26 +12188,26 @@ fn zirShl( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12204,7 +12217,7 @@ fn zirShl( const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; @@ -12213,7 +12226,7 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { @@ -12222,12 +12235,12 @@ fn zirShl( return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), - .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), @@ -12241,11 +12254,11 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, target), + try lhs_ty.maxInt(sema.arena, mod), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12256,11 +12269,11 @@ fn zirShl( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12290,7 +12303,7 @@ fn zirShl( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector) + const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -12319,6 +12332,7 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12330,8 +12344,7 @@ fn zirShr( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const target = sema.mod.getTarget(); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); @@ -12344,18 +12357,18 @@ fn zirShr( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12363,26 +12376,26 @@ fn zirShr( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12405,18 +12418,18 @@ fn zirShr( } } else rhs_src; - if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) { + if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12436,7 +12449,7 @@ fn zirShr( if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, @@ -12461,6 +12474,7 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -12475,8 +12489,8 @@ fn zirBitwise( const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - const scalar_type = resolved_type.scalarType(); - const scalar_tag = scalar_type.zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -12484,7 +12498,7 @@ fn zirBitwise( const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); + return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { @@ -12515,15 +12529,16 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); - const scalar_type = operand_type.scalarType(); + const scalar_type = operand_type.scalarType(mod); - if (scalar_type.zigTypeTag() != .Int) { + if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ operand_type.fmt(sema.mod), }); @@ -12532,7 +12547,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.addConstUndef(operand_type); - } else if (operand_type.zigTypeTag() == .Vector) { + } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); @@ -12728,18 +12743,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); + const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); break :p null; }; - const runtime_src = if (switch (lhs_ty.zigTypeTag()) { + const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { - if (switch (rhs_ty.zigTypeTag()) { + if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, @@ -12841,8 +12857,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Array => return operand_ty.arrayInfo(), .Pointer => { const ptr_info = operand_ty.ptrInfo().data; @@ -12859,7 +12876,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }; }, .One => { - if (ptr_info.pointee_type.zigTypeTag() == .Array) { + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return ptr_info.pointee_type.arrayInfo(); } }, @@ -12867,10 +12884,10 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable()) { + if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) { assert(!peer_ty.isTuple()); return .{ - .elem_type = peer_ty.elemType2(), + .elem_type = peer_ty.elemType2(mod), .sentinel = null, .len = operand_ty.arrayLen(), }; @@ -12970,11 +12987,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation + const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, @@ -12994,7 +13012,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -13082,6 +13100,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13089,9 +13108,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) { + if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { @@ -13108,7 +13127,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13117,6 +13136,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13124,14 +13144,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - switch (rhs_scalar_ty.zigTypeTag()) { + switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13161,6 +13181,7 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13171,8 +13192,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13181,25 +13202,24 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); - if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or - (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat)) + if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or + (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. @@ -13268,7 +13288,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13309,7 +13329,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } const air_tag = if (is_int) blk: { - if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) { + if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); } break :blk Air.Inst.Tag.div_trunc; @@ -13321,6 +13341,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13331,8 +13352,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13341,19 +13362,18 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13437,7 +13457,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { @@ -13459,7 +13479,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); @@ -13484,6 +13504,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13494,8 +13515,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13504,20 +13525,19 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13562,7 +13582,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13600,6 +13620,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13610,8 +13631,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13620,20 +13641,19 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13677,7 +13697,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13727,22 +13747,20 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { + const mod = sema.mod; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. - if (!lhs_scalar_ty.isSignedInt()) return; - - const mod = sema.mod; - const target = mod.getTarget(); + if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. - if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) { + if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } - const min_int = try resolved_type.minInt(sema.arena, target); + const min_int = try resolved_type.minInt(sema.arena, mod); const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); - const neg_one = if (resolved_type.zigTypeTag() == .Vector) + const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else neg_one_scalar; @@ -13759,7 +13777,7 @@ fn addDivIntOverflowSafety( } var ok: Air.Inst.Ref = .none; - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = try sema.addConstant(resolved_type, min_int); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); @@ -13815,7 +13833,8 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const ok = if (resolved_type.zigTypeTag() == .Vector) ok: { + const mod = sema.mod; + const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); @@ -13842,6 +13861,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13852,8 +13872,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13862,20 +13882,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13904,7 +13923,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } else Value.zero; return sema.addConstant(resolved_type, zero_val); } - } else if (lhs_scalar_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { @@ -13929,7 +13948,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstant(resolved_type, rem_result); } break :rs lhs_src; - } else if (rhs_scalar_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; @@ -13978,7 +13997,8 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -13997,13 +14017,13 @@ fn intRemScalar( lhs: Value, rhs: Value, ) CompileError!Value { - const target = sema.mod.getTarget(); + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -14025,6 +14045,7 @@ fn intRemScalar( } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14035,8 +14056,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14048,13 +14069,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14127,6 +14147,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14137,8 +14158,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14150,13 +14171,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14268,7 +14288,7 @@ fn zirOverflowArithmetic( const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); - if (dest_ty.scalarType().zigTypeTag() != .Int) { + if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); } @@ -14434,12 +14454,14 @@ fn zirOverflowArithmetic( } fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { - if (ty.zigTypeTag() != .Vector) return val; + const mod = sema.mod; + if (ty.zigTypeTag(mod) != .Vector) return val; return Value.Tag.repeated.create(sema.arena, val); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const mod = sema.mod; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14468,10 +14490,11 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { @@ -14491,18 +14514,17 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -14910,7 +14932,7 @@ fn analyzeArithmetic( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector) + const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -14944,12 +14966,12 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array) + const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) ptr_info.pointee_type.childType() else ptr_info.pointee_type; @@ -14963,9 +14985,9 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target)); + const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; } else elem_size; @@ -14991,10 +15013,10 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target)); + const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; - if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| { - const elem_size = elem_ty.abiSize(target); + if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { + const elem_size = elem_ty.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, @@ -15116,6 +15138,7 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); + const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -15123,7 +15146,7 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); - switch (uncasted_arg_ty.zigTypeTag()) { + switch (uncasted_arg_ty.zigTypeTag(mod)) { .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), else => { @@ -15205,6 +15228,7 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); @@ -15215,8 +15239,8 @@ fn zirCmpEq( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { @@ -15295,6 +15319,7 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); const union_tag_ty = union_ty.unionTagType() orelse { const msg = msg: { @@ -15313,7 +15338,7 @@ fn analyzeCmpUnionTag( if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, sema.mod); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } } @@ -15352,32 +15377,33 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) { + if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { + if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - if (!resolved_type.isSelfComparable(is_equality_cmp)) { + if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ compareOperatorName(op), resolved_type.fmt(sema.mod), }); @@ -15408,6 +15434,7 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -15415,7 +15442,7 @@ fn cmpSelf( if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); @@ -15427,7 +15454,7 @@ fn cmpSelf( return Air.Inst.Ref.bool_false; } } else { - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } @@ -15436,7 +15463,7 @@ fn cmpSelf( } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); @@ -15446,7 +15473,7 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, src, runtime_src); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized); @@ -15475,10 +15502,11 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15509,8 +15537,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiSize(target, sema.arena); + const val = try ty.lazyAbiSize(mod, sema.arena); if (val.tag() == .lazy_size) { try sema.queueFullTypeResolution(ty); } @@ -15518,10 +15545,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15552,8 +15580,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const bit_size = try operand_ty.bitSizeAdvanced(target, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); return sema.addIntUnsigned(Type.comptime_int, bit_size); } @@ -15765,13 +15792,13 @@ fn zirBuiltinSrc( } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); - const target = sema.mod.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ @@ -15881,8 +15908,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); - var fn_ty_buffer: Value.ToTypeBuffer = undefined; - const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); + const fn_ty = fn_info_decl.val.toType(); const param_info_decl_index = (try sema.namespaceLookup( block, src, @@ -15892,8 +15918,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = sema.mod.declPtr(param_info_decl_index); - var param_buffer: Value.ToTypeBuffer = undefined; - const param_ty = param_info_decl.val.toType(¶m_buffer); + const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( try Type.Tag.array.create(params_anon_decl.arena(), .{ .len = param_vals.len, @@ -15924,7 +15949,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)), + try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -15944,7 +15969,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, field_values[0] = try Value.Tag.enum_field_index.create( @@ -15965,7 +15990,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); + field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -15980,7 +16005,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else - try info.pointee_type.lazyAbiAlignment(target, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod, sema.arena); const field_values = try sema.arena.create([8]Value); field_values.* = .{ @@ -16072,8 +16097,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); @@ -16164,8 +16188,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); + const int_tag_ty = try ty.intTagType().copy(sema.arena); const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); @@ -16182,8 +16205,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const enum_fields = ty.enumFields(); @@ -16275,8 +16297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const union_ty = try sema.resolveTypeFields(ty); @@ -16383,8 +16404,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout @@ -16430,7 +16450,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime), // alignment: comptime_int, - try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()), + try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()), }; struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16463,7 +16483,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else field.default_val; const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(target, layout); + const alignment = field.alignment(mod, layout); struct_field_fields.* = .{ // name: []const u8, @@ -16506,7 +16526,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (layout == .Packed) { const struct_obj = struct_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt()); + assert(struct_obj.backing_int_ty.isInt(mod)); const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { @@ -16584,8 +16604,7 @@ fn typeInfoDecls( try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); + break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16632,8 +16651,7 @@ fn typeInfoNamespaceDecls( if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; try sema.mod.ensureDeclAnalyzed(decl_index); - var buf: Value.ToTypeBuffer = undefined; - const new_ns = decl.val.toType(&buf).getNamespace().?; + const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; } @@ -16709,10 +16727,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - switch (operand.zigTypeTag()) { + const mod = sema.mod; + switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(sema.mod.getTarget()); + const bits = operand.bitSize(mod); const count = if (bits == 0) 0 else blk: { @@ -16723,10 +16742,10 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return Module.makeIntType(sema.arena, .unsigned, count); + return mod.intType(.unsigned, count); }, .Vector => { - const elem_ty = operand.elemType2(); + const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return Type.Tag.vector.create(sema.arena, .{ .len = operand.vectorLen(), @@ -16920,9 +16939,10 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, - .Pointer => if (ty.isPtrLikeOptional()) return, + .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); @@ -16951,10 +16971,11 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveMaybeUndefVal(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } @@ -16963,7 +16984,8 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ ty.fmt(sema.mod), @@ -16986,10 +17008,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -17012,6 +17035,7 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -17052,7 +17076,7 @@ fn zirCondbr( const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - assert(operand_ty.zigTypeTag() == .ErrorUnion); + assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); const result_ty = operand_ty.errorUnionSet(); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17079,7 +17103,7 @@ fn zirCondbr( return always_noreturn; } -fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17087,7 +17111,8 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17124,7 +17149,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! return try_inst; } -fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17133,7 +17158,8 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17275,16 +17301,17 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = try sema.resolveInst(inst_data.operand); const r_brace_src = inst_data.src(); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const base_tag = sema.fn_ret_ty.baseZigTypeTag(); + const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17294,7 +17321,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17397,17 +17424,19 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return false; - return fn_ret_ty.isError() and - sema.mod.comp.bin_file.options.error_return_tracing; + return fn_ret_ty.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; @@ -17415,7 +17444,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - break :b operand_ty.isError(); + break :b operand_ty.isError(mod); }; if (save_index) @@ -17467,11 +17496,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); + const mod = sema.mod; + assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag()) { + switch (op_ty.zigTypeTag(mod)) { .ErrorSet => { try payload.data.addErrorSet(sema.gpa, op_ty); }, @@ -17492,7 +17522,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { + const mod = sema.mod; + if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -17540,6 +17571,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node }; @@ -17582,7 +17614,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air break :blk 0; } } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); break :blk abi_align; } else 0; @@ -17591,7 +17623,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); - } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic; + } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17611,9 +17643,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } @@ -17623,7 +17655,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -17639,7 +17671,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } @@ -17666,8 +17698,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); + const mod = sema.mod; - switch (obj_ty.zigTypeTag()) { + switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return sema.addConstant(obj_ty, Value.void), @@ -17696,9 +17729,10 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arr_len = obj_ty.arrayLen(); if (arr_len != 0) { - if (obj_ty.zigTypeTag() == .Array) { + if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); @@ -17766,13 +17800,14 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); + const mod = sema.mod; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); try sema.resolveTypeLayout(resolved_ty); - if (resolved_ty.zigTypeTag() == .Struct) { + if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. @@ -17815,7 +17850,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| { + if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -17827,7 +17862,7 @@ fn zirStructInit( } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref); - } else if (resolved_ty.zigTypeTag() == .Union) { + } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } @@ -18014,6 +18049,7 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); @@ -18050,7 +18086,7 @@ fn zirStructInitAnon( const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); @@ -18148,15 +18184,16 @@ fn zirArrayInit( const array_ty = try sema.resolveType(block, src, args[0]); const sentinel_val = array_ty.sentinel(); + const mod = sema.mod; const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag() == .Struct) + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) array_ty.structFieldType(i) else - array_ty.elemType2(); + array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); @@ -18169,7 +18206,7 @@ fn zirArrayInit( } if (sentinel_val) |some| { - resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some); + resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { @@ -18227,7 +18264,7 @@ fn zirArrayInit( const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.elemType2(), + .pointee_type = array_ty.elemType2(mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18252,6 +18289,7 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + const mod = sema.mod; const types = try sema.arena.alloc(Type, operands.len); const values = try sema.arena.alloc(Value, operands.len); @@ -18262,7 +18300,7 @@ fn zirArrayInitAnon( const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18379,11 +18417,12 @@ fn fieldType( field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; var cur_ty = aggregate_ty; while (true) { const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; - switch (cur_ty.zigTypeTag()) { + switch (cur_ty.zigTypeTag(mod)) { .Struct => { if (cur_ty.isAnonStruct()) { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); @@ -18449,14 +18488,14 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); if (ty.isNoReturn()) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiAlignment(target, sema.arena); + const val = try ty.lazyAbiAlignment(mod, sema.arena); if (val.tag() == .lazy_align) { try sema.queueFullTypeResolution(ty); } @@ -18499,16 +18538,17 @@ fn zirUnaryMath( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, .Vector => { - const scalar_ty = operand_ty.scalarType(); - switch (scalar_ty.zigTypeTag()) { + const scalar_ty = operand_ty.scalarType(mod); + switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } @@ -18516,9 +18556,9 @@ fn zirUnaryMath( else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -18564,7 +18604,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; try sema.resolveTypeLayout(operand_ty); - const enum_ty = switch (operand_ty.zigTypeTag()) { + const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); const bytes = val.castTag(.enum_literal).?.data; @@ -18654,11 +18694,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const bits_val = struct_val[1]; const signedness = signedness_val.toEnum(std.builtin.Signedness); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); - const ty = switch (signedness) { - .signed => try Type.Tag.int_signed.create(sema.arena, bits), - .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), - }; + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, .Vector => { @@ -18667,9 +18704,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = child_val.toType(&buffer); + const len = len_val.toUnsignedInt(mod); + const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -18682,7 +18718,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // bits: comptime_int, const bits_val = struct_val[0]; - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -18708,10 +18744,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); - var buffer: Value.ToTypeBuffer = undefined; - const unresolved_elem_ty = child_val.toType(&buffer); + const unresolved_elem_ty = child_val.toType(); const elem_ty = if (abi_align == 0) unresolved_elem_ty else t: { @@ -18723,7 +18758,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull()) { + if (!sentinel_val.isNull(mod)) { if (ptr_size == .One or ptr_size == .C) { return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } @@ -18735,9 +18770,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } @@ -18747,7 +18782,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -18763,7 +18798,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } @@ -18790,9 +18825,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // sentinel: ?*const anyopaque, const sentinel_val = struct_val[2]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const len = len_val.toUnsignedInt(mod); + const child_ty = try child_val.toType().copy(sema.arena); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, @@ -18810,8 +18844,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // child: type, const child_val = struct_val[0]; - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const child_ty = try child_val.toType().copy(sema.arena); const ty = try Type.optional(sema.arena, child_ty); return sema.addType(ty); @@ -18824,11 +18857,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // payload: type, const payload_val = struct_val[1]; - var buffer: Value.ToTypeBuffer = undefined; - const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena); - const payload_ty = try payload_val.toType(&buffer).copy(sema.arena); + const error_set_ty = try error_set_val.toType().copy(sema.arena); + const payload_ty = try payload_val.toType().copy(sema.arena); - if (error_set_ty.zigTypeTag() != .ErrorSet) { + if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } @@ -18839,11 +18871,11 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue() orelse + const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.initTag(.anyerror)); const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget())); + const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); var names: Module.ErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; @@ -18890,7 +18922,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull()) { + if (layout != .Packed and !backing_int_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } @@ -18954,10 +18986,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; // Enum tag type - var buffer: Value.ToTypeBuffer = undefined; - const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); + const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator); - if (int_tag_ty.zigTypeTag() != .Int) { + if (int_tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); } enum_obj.tag_ty = int_tag_ty; @@ -19090,7 +19121,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const new_decl_arena_allocator = new_decl_arena.allocator(); const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull()) + const type_tag = if (!tag_type_val.isNull(mod)) Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" @@ -19130,11 +19161,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var tag_ty_field_names: ?Module.EnumFull.NameMap = null; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - if (tag_type_val.optionalValue()) |payload_val| { - var buffer: Value.ToTypeBuffer = undefined; - union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator); + if (tag_type_val.optionalValue(mod)) |payload_val| { + union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); } tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); @@ -19187,14 +19217,13 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "duplicate union field {s}", .{field_name}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, - .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?), + .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(sema.gpa); @@ -19216,7 +19245,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19280,20 +19309,18 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(target)); + const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { break :alignment 0; } else { break :alignment alignment; } }; - const return_type = return_type_val.optionalValue() orelse + const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var buf: Value.ToTypeBuffer = undefined; - const args_slice_val = args_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); + const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); const param_types = try sema.arena.alloc(Type, args_len); const comptime_params = try sema.arena.alloc(bool, args_len); @@ -19316,12 +19343,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue() orelse + const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType(&buf).copy(sema.arena); + const param_type = try param_type_val.toType().copy(sema.arena); if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime()) { + if (!param_type.isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse @@ -19336,7 +19363,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .param_types = param_types, .comptime_params = comptime_params.ptr, .noalias_bits = noalias_bits, - .return_type = try return_type.toType(&buf).copy(sema.arena), + .return_type = try return_type.toType().copy(sema.arena), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19396,8 +19423,6 @@ fn reifyStruct( }, }; - const target = mod.getTarget(); - // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); @@ -19420,7 +19445,7 @@ fn reifyStruct( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); @@ -19461,7 +19486,7 @@ fn reifyStruct( return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); } - const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { const payload_val = if (opt_val.pointerDecl()) |opt_decl| mod.declPtr(opt_decl).val else @@ -19472,8 +19497,7 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19482,7 +19506,7 @@ fn reifyStruct( .offset = undefined, }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -19492,7 +19516,7 @@ fn reifyStruct( }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(sema.gpa); @@ -19514,7 +19538,7 @@ fn reifyStruct( break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19545,20 +19569,15 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } - if (backing_int_val.optionalValue()) |payload| { - var buf: Value.ToTypeBuffer = undefined; - const backing_int_ty = payload.toType(&buf); + if (backing_int_val.optionalValue(mod)) |payload| { + const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } struct_obj.status = .have_layout; @@ -19569,6 +19588,7 @@ fn reifyStruct( } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19594,7 +19614,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag() == .Optional) + const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) try Type.optional(sema.arena, dest_ptr_ty) else dest_ptr_ty; @@ -19716,6 +19736,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19730,12 +19751,12 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known"); } try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); - if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) { + if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); @@ -19755,6 +19776,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19769,7 +19791,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -19778,6 +19800,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -19790,9 +19813,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_ty = try sema.resolveType(block, src, extra.lhs); try sema.checkPtrType(block, type_src, ptr_ty); - const elem_ty = ptr_ty.elemType2(); - const target = sema.mod.getTarget(); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema); + const elem_ty = ptr_ty.elemType2(mod); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); if (ptr_ty.isSlice()) { const msg = msg: { @@ -19805,8 +19827,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { - const addr = val.toUnsignedInt(target); - if (!ptr_ty.isAllowzeroPtr() and addr == 0) + const addr = val.toUnsignedInt(mod); + if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); @@ -19820,8 +19842,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) { - if (!ptr_ty.isAllowzeroPtr()) { + if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { + if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -19926,6 +19948,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19934,7 +19957,6 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); @@ -19982,18 +20004,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else operand; - const dest_elem_ty = dest_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(target); + const dest_align = dest_ty.ptrAlignment(mod); - const operand_elem_ty = operand_ty.elemType2(); + const operand_elem_ty = operand_ty.elemType2(mod); try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(target); + const operand_align = operand_ty.ptrAlignment(mod); // If the destination is less aligned than the source, preserve the source alignment const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - if (dest_ty.zigTypeTag() == .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional) { var buf: Type.Payload.ElemType = undefined; var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; dest_ptr_info.@"align" = operand_align; @@ -20006,8 +20028,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(target); - const dest_elem_size = dest_elem_ty.abiSize(target); + const operand_elem_size = operand_elem_ty.abiSize(mod); + const dest_elem_size = dest_elem_ty.abiSize(mod); if (operand_elem_size != dest_elem_size) { return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); } @@ -20032,21 +20054,21 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } - if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); } return sema.addConstant(aligned_dest_ty, operand_val); } try sema.requireRuntimeBlock(block, src, null); - if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.ptrtoint, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); @@ -20102,6 +20124,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20112,7 +20135,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag() == .Vector; + const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) else @@ -20122,15 +20145,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.coerce(block, dest_ty, operand, operand_src); } - const target = sema.mod.getTarget(); - const dest_info = dest_scalar_ty.intInfo(target); + const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return sema.addConstant(dest_ty, val); } - if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) { - const operand_info = operand_ty.intInfo(target); + if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } @@ -20186,6 +20208,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20199,12 +20222,12 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo().data; ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - if (ptr_ty.zigTypeTag() == .Optional) { + if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| { + if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { if (addr % dest_align != 0) { return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } @@ -20247,23 +20270,23 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = operand_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } - const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits); - switch (operand_ty.zigTypeTag()) { + const result_scalar_ty = try mod.smallestUnsignedInt(bits); + switch (operand_ty.zigTypeTag(mod)) { .Vector => { const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); @@ -20272,10 +20295,10 @@ fn zirBitCount( var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - const count = comptimeOp(elem_val, scalar_ty, target); + const count = comptimeOp(elem_val, scalar_ty, mod); elem.* = try Value.Tag.int_u64.create(sema.arena, count); } return sema.addConstant( @@ -20291,7 +20314,7 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); try sema.resolveLazyValue(val); - return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target)); + return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -20302,14 +20325,14 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = scalar_ty.intInfo(target).bits; + const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, @@ -20323,11 +20346,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant(operand_ty, val); } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.byteSwap(operand_ty, target, sema.arena); + const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20344,7 +20367,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); + elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20371,12 +20394,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstant(operand_ty, val); } - const target = sema.mod.getTarget(); - switch (operand_ty.zigTypeTag()) { + const mod = sema.mod; + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.bitReverse(operand_ty, target, sema.arena); + const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20393,7 +20416,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena); + elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20429,10 +20452,10 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const ty = try sema.resolveType(block, lhs_src, extra.lhs); const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); - const target = sema.mod.getTarget(); + const mod = sema.mod; try sema.resolveTypeLayout(ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => {}, else => { const msg = msg: { @@ -20464,15 +20487,16 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } else unreachable; }, - else => return ty.structFieldOffset(field_index, target) * 8, + else => return ty.structFieldOffset(field_index, mod) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), } @@ -20480,7 +20504,8 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), @@ -20493,7 +20518,8 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .Pointer => switch (ty.ptrSize()) { .One, .Slice => return, .Many, .C => return sema.fail( @@ -20532,7 +20558,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20550,7 +20577,7 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20562,7 +20589,8 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20580,7 +20608,7 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20592,9 +20620,10 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, - else => if (ty.isPtrAtRuntime()) return, + else => if (ty.isPtrAtRuntime(mod)) return, } return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); } @@ -20605,7 +20634,8 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20617,9 +20647,10 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20637,9 +20668,9 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - var diag: target_util.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) { + const mod = sema.mod; + var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; + const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -20668,7 +20699,7 @@ fn checkAtomicPtrOperand( }; const ptr_ty = sema.typeOf(ptr); - const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) { + const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo().data, else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); @@ -20735,12 +20766,13 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (try operand_ty.zigTypeTagOrPoison()) { + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20759,11 +20791,12 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - switch (try operand_ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20777,7 +20810,8 @@ fn checkIntOrVectorAllowComptime( } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet => return, else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20805,11 +20839,12 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20823,7 +20858,7 @@ fn checkSimdBinOp( .lhs_val = try sema.resolveMaybeUndefVal(lhs), .rhs_val = try sema.resolveMaybeUndefVal(rhs), .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), + .scalar_ty = result_ty.scalarType(mod), }; } @@ -20836,8 +20871,9 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const mod = sema.mod; + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { @@ -20892,6 +20928,7 @@ fn resolveExportOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { + const mod = sema.mod; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -20904,7 +20941,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); const name_ty = Type.initTag(.const_slice_u8); - const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod); + const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); @@ -20913,8 +20950,8 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); const section_ty = Type.initTag(.const_slice_u8); - const section = if (section_opt_val.optionalValue()) |section_val| - try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod) + const section = if (section_opt_val.optionalValue(mod)) |section_val| + try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else null; @@ -20979,6 +21016,7 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -20996,7 +21034,7 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag() == .Float) { + if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, @@ -21102,26 +21140,26 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.zigTypeTag(mod) != .Vector) { + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } const scalar_ty = operand_ty.childType(); // Type-check depending on operation. switch (operation) { - .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { + .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, - .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { + .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, } @@ -21136,19 +21174,19 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); + var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); var elem_buf: Value.ElemValueBuffer = undefined; var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), - .Min => accum = accum.numberMin(elem_val, target), - .Max => accum = accum.numberMax(elem_val, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), + .Min => accum = accum.numberMin(elem_val, mod), + .Max => accum = accum.numberMax(elem_val, mod), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), } } return sema.addConstant(scalar_ty, accum); @@ -21165,6 +21203,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -21177,7 +21216,7 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { + const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(mask).arrayLen(), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; @@ -21200,6 +21239,7 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node }; const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node }; const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node }; @@ -21211,7 +21251,7 @@ fn analyzeShuffle( .elem_type = elem_ty, }); - var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { + var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(a).arrayLen(), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21219,7 +21259,7 @@ fn analyzeShuffle( sema.typeOf(a).fmt(sema.mod), }), }; - var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { + var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(b).arrayLen(), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21255,7 +21295,7 @@ fn analyzeShuffle( var buf: Value.ElemValueBuffer = undefined; const elem = mask.elemValueBuffer(sema.mod, i, &buf); if (elem.isUndef()) continue; - const int = elem.toSignedInt(sema.mod.getTarget()); + const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -21297,7 +21337,7 @@ fn analyzeShuffle( values[i] = Value.undef; continue; } - const int = mask_elem_val.toSignedInt(sema.mod.getTarget()); + const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); @@ -21356,6 +21396,7 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -21369,7 +21410,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); - const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { + const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; @@ -21489,6 +21530,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); @@ -21505,7 +21547,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, @@ -21536,7 +21578,6 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { - const target = sema.mod.getTarget(); const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -21544,12 +21585,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), - .Max => stored_val.numberMax (operand_val, target), - .Min => stored_val.numberMin (operand_val, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), + .Max => stored_val.numberMax (operand_val, mod), + .Min => stored_val.numberMin (operand_val, mod), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -21623,8 +21664,9 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); + const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .Vector => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -21743,7 +21785,6 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); - const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null); } @@ -21760,13 +21801,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); + const mod = sema.mod; - if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(parent_ty); - const field_index = switch (parent_ty.zigTypeTag()) { + const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { if (parent_ty.isTuple()) { if (mem.eql(u8, field_name, "len")) { @@ -21781,7 +21823,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21913,15 +21955,14 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); + const mod = sema.mod; if (operands.len == 1) return operands[0]; - const mod = sema.mod; - const target = mod.getTarget(); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, - else => unreachable, + else => @compileError("unreachable"), }; // First, find all comptime-known arguments, and get their min/max @@ -21949,7 +21990,7 @@ fn analyzeMinMax( try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, target); + const result_val = opFunc(cur_val, operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; @@ -21959,7 +22000,7 @@ fn analyzeMinMax( for (elems, 0..) |*elem, i| { const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( simd_op.result_ty, @@ -21984,7 +22025,7 @@ fn analyzeMinMax( break :refined orig_ty; } - const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { + const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { const elem_ty = orig_ty.childType(); const len = orig_ty.vectorLen(); @@ -21996,16 +22037,16 @@ fn analyzeMinMax( for (1..len) |idx| { const elem_val = try val.elemValue(mod, sema.arena, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef - if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; - if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } - const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); + const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try Type.vector(sema.arena, len, refined_elem_ty); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef - break :blk try Type.intFittingRange(target, sema.arena, val, val); + break :blk try mod.intFittingRange(val, val); }; // Apply the refined type to the current value - this isn't strictly necessary in the @@ -22061,7 +22102,7 @@ fn analyzeMinMax( // Finally, refine the type based on the comptime-known bound. if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); - const is_vector = unrefined_ty.zigTypeTag() == .Vector; + const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; @@ -22069,18 +22110,18 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, target), - .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(sema.arena, mod), + .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, target), + .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, mod), else => unreachable, }; // Find the smallest type which can contain these bounds - const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) @@ -22132,6 +22173,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const target = sema.mod.getTarget(); + const mod = sema.mod; if (dest_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); @@ -22196,7 +22238,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22239,12 +22281,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // lowering. The AIR instruction requires pointers with element types of // equal ABI size. - if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) { + if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } - const dest_elem_ty = dest_ty.elemType2(); - const src_elem_ty = src_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); + const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } @@ -22255,7 +22297,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = val.toUnsignedInt(target); + const len = val.toUnsignedInt(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -22320,6 +22362,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22334,14 +22377,13 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } - const dest_elem_ty = dest_ptr_ty.elemType2(); - const target = sema.mod.getTarget(); + const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -22499,9 +22541,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node }; const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node }; @@ -22535,7 +22578,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.tag() == .generic_poison) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(target)); + const alignment = @intCast(u32, val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22551,7 +22594,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target)); + const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22642,8 +22685,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = try val.toType(&buffer).copy(sema.arena); + const ty = try val.toType().copy(sema.arena); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); @@ -22654,8 +22696,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - var buffer: Value.ToTypeBuffer = undefined; - const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena); + const ty = try ret_ty_tv.val.toType().copy(sema.arena); break :blk ty; } else Type.void; @@ -22727,13 +22768,14 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known"); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(rhs).zigTypeTag() != .Void) { + if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known"); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { @@ -22799,9 +22841,9 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { + const mod = sema.mod; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); - const target = sema.mod.getTarget(); const rw_src = sema.maybeOptionsSrc(block, src, "rw"); const locality_src = sema.maybeOptionsSrc(block, src, "locality"); @@ -22818,7 +22860,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), - .locality = @intCast(u2, locality_val.toUnsignedInt(target)), + .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), }; } @@ -22887,7 +22929,7 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull()) blk: { + const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); if (library_name.len == 0) { @@ -22917,17 +22959,17 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; var ty = try sema.resolveType(block, ty_src, extra.lhs); - if (!ty.isPtrAtRuntime()) { + if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } if (!try sema.validateExternType(ty.childType(), .other)) { const msg = msg: { - const mod = sema.mod; const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); @@ -22945,7 +22987,7 @@ fn zirBuiltinExtern( else => |e| return e, }; - if (options.linkage == .Weak and !ty.ptrAllowsZero()) { + if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { ty = try Type.optional(sema.arena, ty); } @@ -23087,7 +23129,7 @@ fn validateVarType( const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); - if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) { + if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23101,8 +23143,9 @@ fn validateRunTimeType( var_ty: Type, is_extern: bool, ) CompileError!bool { + const mod = sema.mod; var ty = var_ty; - while (true) switch (ty.zigTypeTag()) { + while (true) switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23126,9 +23169,9 @@ fn validateRunTimeType( .Pointer => { const elem_ty = ty.childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, - .Fn => return elem_ty.isFnOrHasRuntimeBits(), + .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), else => ty = elem_ty, } }, @@ -23174,7 +23217,7 @@ fn explainWhyTypeIsComptimeInner( type_set: *TypeSet, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23211,8 +23254,8 @@ fn explainWhyTypeIsComptimeInner( try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); }, .Pointer => { - const elem_ty = ty.elemType2(); - if (elem_ty.zigTypeTag() == .Fn) { + const elem_ty = ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Fn) { const fn_info = elem_ty.fnInfo(); if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); @@ -23221,7 +23264,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly()) { + if (fn_info.return_type.comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23295,7 +23338,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23314,7 +23358,7 @@ fn validateExternType( .AnyFrame, => return true, .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), - .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) { + .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, }, @@ -23329,14 +23373,12 @@ fn validateExternType( return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); }, .Enum => { - var buf: Type.Payload.Bits = undefined; - return sema.validateExternType(ty.intTagType(&buf), position); + return sema.validateExternType(ty.intTagType(), position); }, .Struct, .Union => switch (ty.containerLayout()) { .Extern => return true, .Packed => { - const target = sema.mod.getTarget(); - const bit_size = try ty.bitSizeAdvanced(target, sema); + const bit_size = try ty.bitSizeAdvanced(mod, sema); switch (bit_size) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23346,10 +23388,10 @@ fn validateExternType( }, .Array => { if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.elemType2(), .element); + return sema.validateExternType(ty.elemType2(mod), .element); }, - .Vector => return sema.validateExternType(ty.elemType2(), .element), - .Optional => return ty.isPtrLikeOptional(), + .Vector => return sema.validateExternType(ty.elemType2(mod), .element), + .Optional => return ty.isPtrLikeOptional(mod), } } @@ -23361,7 +23403,7 @@ fn explainWhyTypeIsNotExtern( position: ExternPosition, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, @@ -23390,7 +23432,7 @@ fn explainWhyTypeIsNotExtern( }, .Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), - .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) { + .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{}); } else { try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{}); @@ -23409,8 +23451,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - var buf: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buf); + const tag_ty = ty.intTagType(); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -23422,17 +23463,17 @@ fn explainWhyTypeIsNotExtern( } else if (position == .param_ty) { return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, - .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element), + .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type) bool { - switch (ty.zigTypeTag()) { +fn validatePackedType(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23448,7 +23489,7 @@ fn validatePackedType(ty: Type) bool { .Fn, .Array, => return false, - .Optional => return ty.isPtrLikeOptional(), + .Optional => return ty.isPtrLikeOptional(mod), .Void, .Bool, .Float, @@ -23468,7 +23509,7 @@ fn explainWhyTypeIsNotPacked( ty: Type, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, @@ -23731,6 +23772,7 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); + const mod = sema.mod; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); @@ -23743,7 +23785,7 @@ fn panicSentinelMismatch( break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; - const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: { + const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ @@ -23753,7 +23795,7 @@ fn panicSentinelMismatch( .operation = .And, } }, }); - } else if (sentinel_ty.isSelfComparable(true)) + } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); @@ -23848,6 +23890,7 @@ fn fieldVal( // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. + const mod = sema.mod; const arena = sema.arena; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -23862,7 +23905,7 @@ fn fieldVal( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { return sema.addConstant( @@ -23926,10 +23969,9 @@ fn fieldVal( object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (try child_type.zigTypeTagOrPoison()) { + switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { if (payload.data.names.getEntry(field_name)) |entry| { @@ -23997,7 +24039,7 @@ fn fieldVal( const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24035,9 +24077,10 @@ fn fieldPtr( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); - const object_ty = switch (object_ptr_ty.zigTypeTag()) { + const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.elemType(), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24052,7 +24095,7 @@ fn fieldPtr( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { var anon_decl = try block.startAnonDecl(); @@ -24142,10 +24185,9 @@ fn fieldPtr( result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { @@ -24258,15 +24300,16 @@ fn fieldCallBind( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) raw_ptr_ty.childType() else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) @@ -24275,7 +24318,7 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag()) { + switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); if (struct_ty.castTag(.@"struct")) |struct_obj| { @@ -24321,21 +24364,21 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = switch (concrete_ty.zigTypeTag()) { + const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag() == .Fn and + if (decl_type.zigTypeTag(mod) == .Fn and decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); const first_param_tag = first_param_type.tag(); // zig fmt: off if (first_param_tag == .generic_poison or ( - first_param_type.zigTypeTag() == .Pointer and + first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and first_param_type.childType().eql(concrete_ty, sema.mod))) @@ -24356,7 +24399,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag() == .Optional) { + } else if (first_param_type.zigTypeTag(mod) == .Optional) { var opt_buf: Type.Payload.ElemType = undefined; const child = first_param_type.optionalChild(&opt_buf); if (child.eql(concrete_ty, sema.mod)) { @@ -24365,7 +24408,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag() == .Pointer and + } else if (child.zigTypeTag(mod) == .Pointer and child.ptrSize() == .One and child.childType().eql(concrete_ty, sema.mod)) { @@ -24374,7 +24417,7 @@ fn fieldCallBind( .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag() == .ErrorUnion and + } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); @@ -24421,9 +24464,10 @@ fn finishFieldCallBind( .@"addrspace" = ptr_ty.ptrAddressSpace(), }); + const mod = sema.mod; const container_ty = ptr_ty.childType(); - if (container_ty.zigTypeTag() == .Struct) { - if (container_ty.structFieldValueComptime(field_index)) |default_val| { + if (container_ty.zigTypeTag(mod) == .Struct) { + if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24504,7 +24548,8 @@ fn structFieldPtr( unresolved_struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); @@ -24544,6 +24589,7 @@ fn structFieldPtrByIndex( return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } + const mod = sema.mod; const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); @@ -24568,7 +24614,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(target)); + running_bits += @intCast(u16, f.ty.bitSize(mod)); } ptr_ty_data.host_size = (running_bits + 7) / 8; @@ -24582,7 +24628,7 @@ fn structFieldPtrByIndex( const parent_align = if (struct_ptr_ty_info.@"align" != 0) struct_ptr_ty_info.@"align" else - struct_ptr_ty_info.pointee_type.abiAlignment(target); + struct_ptr_ty_info.pointee_type.abiAlignment(mod); ptr_ty_data.@"align" = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. @@ -24596,8 +24642,8 @@ fn structFieldPtrByIndex( if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { - const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target); - const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target); + const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod); + const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.bit_offset / 8; const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align)); @@ -24644,7 +24690,8 @@ fn structFieldVal( field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); switch (struct_ty.tag()) { @@ -24728,9 +24775,10 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24743,7 +24791,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -24762,7 +24810,9 @@ fn unionFieldPtr( initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); @@ -24777,7 +24827,7 @@ fn unionFieldPtr( }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); - if (initializing and field.ty.zigTypeTag() == .NoReturn) { + if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -24839,7 +24889,7 @@ fn unionFieldPtr( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24855,7 +24905,8 @@ fn unionFieldVal( field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -24911,7 +24962,7 @@ fn unionFieldVal( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24928,22 +24979,22 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); - const target = sema.mod.getTarget(); - const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { + const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.elemType(), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -24966,7 +25017,7 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -24978,7 +25029,7 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); @@ -24989,7 +25040,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25006,7 +25057,7 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -25014,7 +25065,7 @@ fn elemVal( // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Pointer => switch (indexable_ty.ptrSize()) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { @@ -25024,10 +25075,10 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(), elem_val); + return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; }; @@ -25036,7 +25087,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25049,7 +25100,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -25093,6 +25144,7 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(); _ = try sema.resolveTypeFields(tuple_ty); @@ -25116,7 +25168,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), }); - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25151,6 +25203,7 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); const field_count = tuple_ty.structFieldCount(); @@ -25166,13 +25219,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index)); + return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25191,6 +25244,7 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const array_ty = sema.typeOf(array); const array_sent = array_ty.sentinel(); const array_len = array_ty.arrayLen(); @@ -25204,10 +25258,9 @@ fn elemValArray( const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(elem_ty, s); @@ -25223,7 +25276,7 @@ fn elemValArray( return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); return sema.addConstant(elem_ty, elem_val); } @@ -25255,7 +25308,7 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = array_ptr_ty.childType(); const array_sent = array_ty.sentinel() != null; @@ -25269,7 +25322,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -25290,7 +25343,7 @@ fn elemPtrArray( } if (!init) { - try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src); + try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; @@ -25316,16 +25369,16 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; - const elem_ty = slice_ty.elemType2(); + const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; @@ -25335,7 +25388,7 @@ fn elemValSlice( return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); @@ -25373,14 +25426,14 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); break :o index; } else null; @@ -25484,6 +25537,7 @@ fn coerceExtra( const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); + const mod = sema.mod; const target = sema.mod.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, sema.mod)) @@ -25502,9 +25556,9 @@ fn coerceExtra( return block.addBitCast(dest_ty, inst); } - const is_undef = inst_ty.zigTypeTag() == .Undefined; + const is_undef = inst_ty.zigTypeTag(mod) == .Undefined; - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Optional => optional: { // undefined sets the optional bit also to undefined. if (is_undef) { @@ -25512,18 +25566,18 @@ fn coerceExtra( } // null to ?T - if (inst_ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag(mod) == .Null) { return sema.addConstant(dest_ty, Value.null); } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and - inst_ty.isPtrAtRuntime()) + if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and + inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25532,7 +25586,7 @@ fn coerceExtra( } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. - if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check; + if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } @@ -25554,7 +25608,7 @@ fn coerceExtra( const dest_info = dest_ty.ptrInfo().data; // Function body to function pointer. - if (inst_ty.zigTypeTag() == .Fn) { + if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); const fn_decl = fn_val.pointerDecl().?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); @@ -25568,7 +25622,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; - if (array_ty.zigTypeTag() != .Array) break :single_item; + if (array_ty.zigTypeTag(mod) != .Array) break :single_item; const array_elem_ty = array_ty.childType(); if (array_ty.arrayLen() != 1) break :single_item; const dest_is_mut = dest_info.mutable; @@ -25584,7 +25638,7 @@ fn coerceExtra( if (!inst_ty.isSinglePointer()) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const array_ty = inst_ty.childType(); - if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; + if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; const array_elem_type = array_ty.childType(); const dest_is_mut = dest_info.mutable; @@ -25656,10 +25710,10 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25679,7 +25733,7 @@ fn coerceExtra( switch (dest_info.size) { // coercion to C pointer - .C => switch (inst_ty.zigTypeTag()) { + .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { return sema.addConstant(dest_ty, Value.null); }, @@ -25691,7 +25745,7 @@ fn coerceExtra( return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { - const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { + const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; @@ -25733,7 +25787,7 @@ fn coerceExtra( }, else => {}, }, - .One => switch (dest_info.pointee_type.zigTypeTag()) { + .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer() and @@ -25767,7 +25821,7 @@ fn coerceExtra( else => {}, }, .Slice => to_slice: { - if (inst_ty.zigTypeTag() == .Array) { + if (inst_ty.zigTypeTag(mod) == .Array) { return sema.fail( block, inst_src, @@ -25789,7 +25843,7 @@ fn coerceExtra( .ptr = if (dest_info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena), + try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, }); return sema.addConstant(dest_ty, slice_val); @@ -25834,13 +25888,13 @@ fn coerceExtra( }, } }, - .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { + .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) { .Float, .ComptimeFloat => float: { if (is_undef) { return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } @@ -25870,15 +25924,15 @@ fn coerceExtra( } return try sema.addConstant(dest_ty, val); } - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } // integer widening - const dst_info = dest_ty.intInfo(target); - const src_info = inst_ty.intInfo(target); + const dst_info = dest_ty.intInfo(mod); + const src_info = inst_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) @@ -25892,7 +25946,7 @@ fn coerceExtra( }, else => {}, }, - .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { + .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const result_val = try val.floatCast(sema.arena, dest_ty, target); @@ -25913,7 +25967,7 @@ fn coerceExtra( ); } return try sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25931,7 +25985,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeFloat) { + if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25955,7 +26009,7 @@ fn coerceExtra( }, else => {}, }, - .Enum => switch (inst_ty.zigTypeTag()) { + .Enum => switch (inst_ty.zigTypeTag(mod)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); @@ -25991,7 +26045,7 @@ fn coerceExtra( }, else => {}, }, - .ErrorUnion => switch (inst_ty.zigTypeTag()) { + .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { switch (inst_val.tag()) { @@ -26031,7 +26085,7 @@ fn coerceExtra( }; }, }, - .Union => switch (inst_ty.zigTypeTag()) { + .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isAnonStruct()) { @@ -26043,7 +26097,7 @@ fn coerceExtra( }, else => {}, }, - .Array => switch (inst_ty.zigTypeTag()) { + .Array => switch (inst_ty.zigTypeTag(mod)) { .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { @@ -26058,7 +26112,7 @@ fn coerceExtra( }, else => {}, }, - .Vector => switch (inst_ty.zigTypeTag()) { + .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isTuple()) { @@ -26093,7 +26147,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; - if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) { + if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); @@ -26111,7 +26165,7 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); // E!T to T - if (inst_ty.zigTypeTag() == .ErrorUnion and + if (inst_ty.zigTypeTag(mod) == .ErrorUnion and (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); @@ -26120,7 +26174,7 @@ fn coerceExtra( // ?T to T var buf: Type.Payload.ElemType = undefined; - if (inst_ty.zigTypeTag() == .Optional and + if (inst_ty.zigTypeTag(mod) == .Optional and (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); @@ -26133,7 +26187,7 @@ fn coerceExtra( if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - if (inst_ty.isError() and !dest_ty.isError()) { + if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); } else { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); @@ -26264,6 +26318,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { + const mod = sema.mod; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -26445,8 +26500,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(); - const actual_allow_zero = pair.actual.ptrAllowsZero(); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); + const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), @@ -26522,13 +26577,15 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, sema.mod)) + const mod = sema.mod; + + if (dest_ty.eql(src_ty, mod)) return .ok; // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) { - const dest_info = dest_ty.intInfo(target); - const src_info = src_ty.intInfo(target); + if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) { + const dest_info = dest_ty.intInfo(mod); + const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) @@ -26551,7 +26608,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) { + if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -26575,8 +26632,8 @@ fn coerceInMemoryAllowed( return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(); - const src_tag = src_ty.zigTypeTag(); + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); // Functions if (dest_tag == .Fn and src_tag == .Fn) { @@ -26624,7 +26681,7 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), @@ -26646,8 +26703,8 @@ fn coerceInMemoryAllowed( } }; } - const dest_elem_ty = dest_ty.scalarType(); - const src_elem_ty = src_ty.scalarType(); + const dest_elem_ty = dest_ty.scalarType(mod); + const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ @@ -26923,6 +26980,7 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; @@ -26964,8 +27022,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_allow_zero = dest_ty.ptrAllowsZero(); - const src_allow_zero = src_ty.ptrAllowsZero(); + const dest_allow_zero = dest_ty.ptrAllowsZero(mod); + const src_allow_zero = src_ty.ptrAllowsZero(mod); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or @@ -27013,12 +27071,12 @@ fn coerceInMemoryAllowedPtrs( const src_align = if (src_info.@"align" != 0) src_info.@"align" else - src_info.pointee_type.abiAlignment(target); + src_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > src_align) { return InMemoryCoercionResult{ .ptr_alignment = .{ @@ -27041,8 +27099,9 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; + const mod = sema.mod; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag()) { + const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -27124,7 +27183,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) { + const mod = sema.mod; + if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; while (i < field_count) : (i += 1) { @@ -27225,7 +27285,8 @@ fn storePtr2( /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const array_ty = sema.typeOf(ptr).childType(); - if (array_ty.zigTypeTag() != .Array) return null; + const mod = sema.mod; + if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); @@ -27237,7 +27298,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; - if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; + if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; ptr_inst = Air.refToIndex(prev_ptr) orelse return null; } else return null; @@ -27263,6 +27324,7 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { + const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); @@ -27281,8 +27343,7 @@ fn storePtrVal( val_ptr.* = try operand_val.copy(arena); }, .reinterpret => |reinterpret| { - const target = sema.mod.getTarget(); - const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { @@ -27354,7 +27415,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; @@ -27375,7 +27436,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag()) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { const check_len = parent.ty.arrayLenIncludingSentinel(); if (elem_ptr.index >= check_len) { @@ -27570,7 +27631,7 @@ fn beginComptimePtrMutation( }, }, .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout()) { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { // Even though the parent value type has well-defined memory layout, our // pointer type does not. return ComptimePtrMutationKit{ @@ -27608,7 +27669,7 @@ fn beginComptimePtrMutation( const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - switch (parent.ty.zigTypeTag()) { + switch (parent.ty.zigTypeTag(mod)) { .Struct => { const fields = try arena.alloc(Value, parent.ty.structFieldCount()); @memset(fields, Value.undef); @@ -27746,7 +27807,7 @@ fn beginComptimePtrMutation( else => unreachable, }, .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, @@ -27872,7 +27933,8 @@ fn beginComptimePtrMutationInner( ptr_elem_ty: Type, decl_ref_mut: Value.Payload.DeclRefMut.Data, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; if (coerce_ok) { return ComptimePtrMutationKit{ @@ -27883,7 +27945,7 @@ fn beginComptimePtrMutationInner( } // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector()) { + if (decl_ty.isArrayOrVector(mod)) { const decl_elem_ty = decl_ty.childType(); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ @@ -27894,14 +27956,14 @@ fn beginComptimePtrMutationInner( } } - if (!decl_ty.hasWellDefinedLayout()) { + if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_decl_ty = {} }, .ty = decl_ty, }; } - if (!ptr_elem_ty.hasWellDefinedLayout()) { + if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_ptr_ty = {} }, @@ -27951,6 +28013,7 @@ fn beginComptimePtrLoad( ptr_val: Value, maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { + const mod = sema.mod; const target = sema.mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { .decl_ref, @@ -27966,7 +28029,7 @@ fn beginComptimePtrLoad( const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - const layout_defined = decl.ty.hasWellDefinedLayout(); + const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, @@ -27988,7 +28051,7 @@ fn beginComptimePtrLoad( } if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout()) { + if (elem_ty.hasWellDefinedLayout(mod)) { if (deref.parent) |*parent| { // Update the byte offset (in-place) const elem_size = try sema.typeAbiSize(elem_ty); @@ -28003,7 +28066,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: { + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; @@ -28018,7 +28081,7 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), @@ -28058,7 +28121,7 @@ fn beginComptimePtrLoad( const field_index = @intCast(u32, field_ptr.field_index); var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - if (field_ptr.container_ty.hasWellDefinedLayout()) { + if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { const struct_ty = field_ptr.container_ty.castTag(.@"struct"); if (struct_ty != null and struct_ty.?.data.layout == .Packed) { // packed structs are not byte addressable @@ -28066,7 +28129,7 @@ fn beginComptimePtrLoad( } else if (deref.parent) |*parent| { // Update the byte offset (in-place) try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { @@ -28103,7 +28166,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, field_index), + .val = tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -28146,7 +28209,7 @@ fn beginComptimePtrLoad( return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); }, .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{}); + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); break :opt tv.val; }, else => unreachable, @@ -28181,7 +28244,7 @@ fn beginComptimePtrLoad( }; if (deref.pointee) |tv| { - if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } } @@ -28196,15 +28259,15 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); try sema.resolveTypeLayout(dest_ty); const old_ty = try sema.resolveTypeFields(sema.typeOf(inst)); try sema.resolveTypeLayout(old_ty); - const target = sema.mod.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const old_bits = old_ty.bitSize(target); + const dest_bits = dest_ty.bitSize(mod); + const old_bits = old_ty.bitSize(mod); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ @@ -28233,20 +28296,20 @@ fn bitCastVal( new_ty: Type, buffer_offset: usize, ) !?Value { - const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, sema.mod)) return val; + const mod = sema.mod; + if (old_ty.eql(new_ty, mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. - const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) { + val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), }; - return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); + return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena); } fn coerceArrayPtrToSlice( @@ -28272,7 +28335,8 @@ fn coerceArrayPtrToSlice( fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { const dest_info = dest_ty.ptrInfo().data; const inst_info = inst_ty.ptrInfo().data; - const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or + const mod = sema.mod; + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); @@ -28298,17 +28362,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul } if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true; if (len0) return true; - const target = sema.mod.getTarget(); const inst_align = if (inst_info.@"align" != 0) inst_info.@"align" else - inst_info.pointee_type.abiAlignment(target); + inst_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > inst_align) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -28327,18 +28390,19 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) { + if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src, null); - const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero(); - if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); + if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const actual_ptr = if (inst_ty.isSlice()) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) @@ -28364,6 +28428,7 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType() orelse { @@ -28396,7 +28461,7 @@ fn coerceEnumToUnion( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -28449,7 +28514,7 @@ fn coerceEnumToUnion( errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.fields.values(), 0..) |field, i| { - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( block, inst_src, @@ -28469,7 +28534,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes()) { + if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { return block.addBitCast(union_ty, enum_tag); } @@ -28487,7 +28552,7 @@ fn coerceEnumToUnion( while (it.next()) |field| : (field_index += 1) { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; + if (!(try sema.typeHasRuntimeBits(field_ty))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); } try sema.addDeclaredHereNote(msg, union_ty); @@ -29066,12 +29131,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); const tv = try decl.typedValue(); - if (tv.ty.zigTypeTag() != .Fn) return; + if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try sema.mod.ensureFuncBodyAnalysisQueued(func.data); + try mod.ensureFuncBodyAnalysisQueued(func.data); } fn analyzeRef( @@ -29124,8 +29190,9 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - const elem_ty = switch (ptr_ty.zigTypeTag()) { + const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29196,12 +29263,13 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { if (opt_val.isUndef()) { return sema.addConstUndef(result_ty); } - const is_null = opt_val.isNull(); + const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; @@ -29213,10 +29281,10 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } - if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); @@ -29230,11 +29298,12 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(operand); - assert(ptr_ty.zigTypeTag() == .Pointer); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); const child_ty = ptr_ty.childType(); - const child_tag = child_ty.zigTypeTag(); + const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false; assert(child_tag == .ErrorUnion); @@ -29251,14 +29320,15 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - const ot = operand_ty.zigTypeTag(); + const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(); - if (payload_ty.zigTypeTag() == .NoReturn) { + if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29375,22 +29445,21 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); - const target = sema.mod.getTarget(); - const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { + const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.elemType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; - const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; - switch (ptr_ptr_child_ty.zigTypeTag()) { + switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { ptr_sentinel = ptr_ptr_child_ty.sentinel(); elem_ty = ptr_ptr_child_ty.childType(); @@ -29398,7 +29467,7 @@ fn analyzeSlice( .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(); - if (double_child_ty.zigTypeTag() == .Array) { + if (double_child_ty.zigTypeTag(mod) == .Array) { ptr_sentinel = double_child_ty.sentinel(); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; @@ -29417,7 +29486,7 @@ fn analyzeSlice( if (ptr_ptr_child_ty.ptrSize() == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { - if (ptr_val.isNull()) { + if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } @@ -29448,7 +29517,7 @@ fn analyzeSlice( // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { - if (array_ty.zigTypeTag() == .Array) { + if (array_ty.zigTypeTag(mod) == .Array) { const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); if (!end_is_len) { @@ -29587,8 +29656,8 @@ fn analyzeSlice( } if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?; - const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?; + const start_int = start_val.getUnsignedInt(mod).?; + const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); @@ -29641,7 +29710,7 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = new_len_val.toUnsignedInt(target); + const new_len_int = new_len_val.toUnsignedInt(mod); const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), @@ -29724,7 +29793,7 @@ fn analyzeSlice( } // requirement: end <= len - const opt_len_inst = if (array_ty.zigTypeTag() == .Array) + const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) else if (slice_ty.isSlice()) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { @@ -29778,14 +29847,15 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - assert(lhs_ty.isNumeric()); - assert(rhs_ty.isNumeric()); + assert(lhs_ty.isNumeric(mod)); + assert(rhs_ty.isNumeric(mod)); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); const target = sema.mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to @@ -29805,14 +29875,14 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) { try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) { + } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) { try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29827,16 +29897,16 @@ fn cmpNumeric( return Air.Inst.Ref.bool_false; } } - if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) { + if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29844,10 +29914,10 @@ fn cmpNumeric( } } else { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) { + if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29901,11 +29971,11 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -29926,7 +29996,7 @@ fn cmpNumeric( .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero()) { + switch (lhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return Air.Inst.Ref.bool_true, @@ -29959,13 +30029,13 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(target); + lhs_bits = lhs_val.intBitCountTwosComp(mod); } lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { - const int_info = lhs_ty.intInfo(target); + const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -29985,7 +30055,7 @@ fn cmpNumeric( .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero()) { + switch (rhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return Air.Inst.Ref.bool_true, @@ -30018,13 +30088,13 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(target); + rhs_bits = rhs_val.intBitCountTwosComp(mod); } rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { - const int_info = rhs_ty.intInfo(target); + const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -30032,7 +30102,7 @@ fn cmpNumeric( const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); + break :blk try mod.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -30040,13 +30110,20 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs); } -/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int. -/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result. +/// Asserts that LHS value is an int or comptime int and not undefined, and +/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to +/// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. -fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool { - const rhs_info = rhs_ty.intInfo(target); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable; +fn compareIntsOnlyPossibleResult( + sema: *Sema, + lhs_val: Value, + op: std.math.CompareOperator, + rhs_ty: Type, +) Allocator.Error!?bool { + const mod = sema.mod; + const rhs_info = rhs_ty.intInfo(mod); + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -30078,7 +30155,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value }; const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -30111,12 +30188,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value .max = false, }; - var ty_buffer: Type.Payload.Bits = .{ - .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned }, - .data = @intCast(u16, req_bits), - }; - const ty = Type.initPayload(&ty_buffer.base); - const pop_count = lhs_val.popCount(ty, target); + const ty = try mod.intType( + if (is_negative) .signed else .unsigned, + @intCast(u16, req_bits), + ); + const pop_count = lhs_val.popCount(ty, mod); if (is_negative) { break :edge .{ @@ -30152,10 +30228,11 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - assert(lhs_ty.zigTypeTag() == .Vector); - assert(rhs_ty.zigTypeTag() == .Vector); + assert(lhs_ty.zigTypeTag(mod) == .Vector); + assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); @@ -30296,16 +30373,17 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { + const mod = sema.mod; switch (instructions.len) { 0 => return Type.initTag(.noreturn), 1 => return sema.typeOf(instructions[0]), else => {}, } - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). + // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this @@ -30318,8 +30396,8 @@ fn resolvePeerTypes( const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); + const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); + const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); // If the candidate can coerce into our chosen type, we're done. // If the chosen type can coerce into the candidate, use that. @@ -30347,8 +30425,8 @@ fn resolvePeerTypes( continue; }, .Int => { - const chosen_info = chosen_ty.intInfo(target); - const candidate_info = candidate_ty.intInfo(target); + const chosen_info = chosen_ty.intInfo(mod); + const candidate_info = candidate_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; @@ -30537,7 +30615,7 @@ fn resolvePeerTypes( // *[N]T to []T if ((cand_info.size == .Many or cand_info.size == .Slice) and chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` convert_to_slice = false; @@ -30546,7 +30624,7 @@ fn resolvePeerTypes( continue; } if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` @@ -30559,8 +30637,8 @@ fn resolvePeerTypes( // Keep the one whose element type can be coerced into. if (chosen_info.size == .One and cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array and - cand_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array) { const chosen_elem_ty = chosen_info.pointee_type.childType(); const cand_elem_ty = cand_info.pointee_type.childType(); @@ -30631,7 +30709,7 @@ fn resolvePeerTypes( .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30639,7 +30717,7 @@ fn resolvePeerTypes( // *[N]T to ?![*]T // *[N]T to ?![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30648,7 +30726,7 @@ fn resolvePeerTypes( }, .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30656,7 +30734,7 @@ fn resolvePeerTypes( // *[N]T to E![*]T // *[N]T to E![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30664,7 +30742,7 @@ fn resolvePeerTypes( } }, .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { + if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30697,16 +30775,16 @@ fn resolvePeerTypes( const chosen_child_ty = chosen_ty.childType(); const candidate_child_ty = candidate_ty.childType(); - if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) { - const chosen_info = chosen_child_ty.intInfo(target); - const candidate_info = candidate_child_ty.intInfo(target); + if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { + const chosen_info = chosen_child_ty.intInfo(mod); + const candidate_info = candidate_child_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } - if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) { + if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -30725,7 +30803,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) { + .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { continue; } @@ -30790,27 +30868,27 @@ fn resolvePeerTypes( // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(sema.mod), - candidate_ty.fmt(sema.mod), + chosen_ty.fmt(mod), + candidate_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); break :msg msg; }; @@ -30826,72 +30904,73 @@ fn resolvePeerTypes( info.data.sentinel = chosen_child_ty.sentinel(); info.data.size = .Slice; info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(); + info.data.pointee_type = chosen_child_ty.elemType2(mod); - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); } if (seen_const) { // turn []T => []const T - switch (chosen_ty.zigTypeTag()) { + switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); var info = ptr_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { var info = chosen_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, else => return chosen_ty, } } if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag()) { + const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, else => try Type.optional(sema.arena, chosen_ty), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); } - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { + if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); + return try Type.errorUnion(sema.arena, ty, payload_ty, mod); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), + else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod), }; return chosen_ty; } pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { + const mod = sema.mod; try sema.resolveTypeFully(fn_info.return_type); - if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } @@ -30943,7 +31022,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { } pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { @@ -31021,7 +31101,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { struct_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, struct_obj.srcLoc(sema.mod), @@ -31043,7 +31123,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }; for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (field.ty.hasRuntimeBits()) + optimized_order[i] = if (!(try sema.typeHasRuntimeBits(field.ty))) @intCast(u32, i) else Module.Struct.omitted_field; @@ -31054,11 +31134,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { sema: *Sema, fn lessThan(ctx: @This(), a: u32, b: u32) bool { + const m = ctx.sema.mod; if (a == Module.Struct.omitted_field) return false; if (b == Module.Struct.omitted_field) return true; - const target = ctx.sema.mod.getTarget(); - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); + return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > + ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); } }; mem.sort(u32, optimized_order, AlignSortContext{ @@ -31073,11 +31153,10 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; - const target = mod.getTarget(); var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } const decl_index = struct_obj.owner_decl; @@ -31178,32 +31257,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (!backing_int_ty.isInt()) { + if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); } - if (backing_int_ty.bitSize(target) != fields_bit_sum) { + if (backing_int_ty.bitSize(mod) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isIndexable()) { + const mod = sema.mod; + if (!ty.isIndexable(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -31215,12 +31291,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (ty.zigTypeTag() == .Pointer) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Pointer) { switch (ty.ptrSize()) { .Slice, .Many, .C => return, .One => { const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Array) return; + if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, @@ -31270,7 +31347,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { union_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, union_obj.srcLoc(sema.mod), @@ -31285,6 +31362,23 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // for hasRuntimeBits() of each field, so we need "requires comptime" // to be known already before this function returns. pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -31349,8 +31443,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -31360,11 +31452,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -31387,7 +31474,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); @@ -31474,7 +31561,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => { const child_ty = try sema.resolveTypeFields(ty.childType()); return sema.resolveTypeFully(child_ty); @@ -31840,7 +31928,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, - type_ref: Air.Inst.Ref = .none, + type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; @@ -31967,7 +32055,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; field.ty = try field_ty.copy(decl_arena_allocator); - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -31981,7 +32069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32010,7 +32098,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32191,7 +32279,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) { + if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); } @@ -32220,7 +32308,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); } // The fields of the union must match the enum exactly. @@ -32281,7 +32369,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk align_ref; } else .none; - const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); @@ -32391,7 +32479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32420,7 +32508,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32673,6 +32761,29 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { .f16, .f32, @@ -32712,10 +32823,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -32803,7 +32910,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } if (enum_obj.fields.count() == 1) { @@ -32819,7 +32926,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .enum_full => { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } switch (enum_obj.fields.count()) { @@ -32843,7 +32950,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { + if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { return Value.zero; } else { return null; @@ -32883,13 +32990,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .null => return Value.null, .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Type.Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); @@ -32919,6 +33019,89 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.ip_index) { + .u1_type => return .u1_type, + .u8_type => return .u8_type, + .i8_type => return .i8_type, + .u16_type => return .u16_type, + .i16_type => return .i16_type, + .u29_type => return .u29_type, + .u32_type => return .u32_type, + .i32_type => return .i32_type, + .u64_type => return .u64_type, + .i64_type => return .i64_type, + .u80_type => return .u80_type, + .u128_type => return .u128_type, + .i128_type => return .i128_type, + .usize_type => return .usize_type, + .isize_type => return .isize_type, + .c_char_type => return .c_char_type, + .c_short_type => return .c_short_type, + .c_ushort_type => return .c_ushort_type, + .c_int_type => return .c_int_type, + .c_uint_type => return .c_uint_type, + .c_long_type => return .c_long_type, + .c_ulong_type => return .c_ulong_type, + .c_longlong_type => return .c_longlong_type, + .c_ulonglong_type => return .c_ulonglong_type, + .c_longdouble_type => return .c_longdouble_type, + .f16_type => return .f16_type, + .f32_type => return .f32_type, + .f64_type => return .f64_type, + .f80_type => return .f80_type, + .f128_type => return .f128_type, + .anyopaque_type => return .anyopaque_type, + .bool_type => return .bool_type, + .void_type => return .void_type, + .type_type => return .type_type, + .anyerror_type => return .anyerror_type, + .comptime_int_type => return .comptime_int_type, + .comptime_float_type => return .comptime_float_type, + .noreturn_type => return .noreturn_type, + .anyframe_type => return .anyframe_type, + .null_type => return .null_type, + .undefined_type => return .undefined_type, + .enum_literal_type => return .enum_literal_type, + .atomic_order_type => return .atomic_order_type, + .atomic_rmw_op_type => return .atomic_rmw_op_type, + .calling_convention_type => return .calling_convention_type, + .address_space_type => return .address_space_type, + .float_mode_type => return .float_mode_type, + .reduce_op_type => return .reduce_op_type, + .call_modifier_type => return .call_modifier_type, + .prefetch_options_type => return .prefetch_options_type, + .export_options_type => return .export_options_type, + .extern_options_type => return .extern_options_type, + .type_info_type => return .type_info_type, + .manyptr_u8_type => return .manyptr_u8_type, + .manyptr_const_u8_type => return .manyptr_const_u8_type, + .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type => return .const_slice_u8_type, + .anyerror_void_error_union_type => return .anyerror_void_error_union_type, + .generic_poison_type => return .generic_poison_type, + .var_args_param_type => return .var_args_param_type, + .empty_struct_type => return .empty_struct_type, + + // values + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .one => unreachable, + .one_usize => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + _ => {}, + + .none => unreachable, + } switch (ty.tag()) { .u1 => return .u1_type, .u8 => return .u8_type, @@ -32934,6 +33117,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i128 => return .i128_type, .usize => return .usize_type, .isize => return .isize_type, + .c_char => return .c_char_type, .c_short => return .c_short_type, .c_ushort => return .c_ushort_type, .c_int => return .c_int_type, @@ -32966,17 +33150,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, - .modifier => return .modifier_type, + .modifier => return .call_modifier_type, .prefetch_options => return .prefetch_options_type, .export_options => return .export_options_type, .extern_options => return .extern_options_type, .type_info => return .type_info_type, .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, - .fn_noreturn_no_args => return .fn_noreturn_no_args_type, - .fn_void_no_args => return .fn_void_no_args_type, - .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, @@ -33186,7 +33366,8 @@ const DerefResult = union(enum) { }; fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return DerefResult{ .runtime_load = {} }, else => |e| return e, @@ -33211,7 +33392,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!load_ty.hasWellDefinedLayout()) { + if (!load_ty.hasWellDefinedLayout(mod)) { return DerefResult{ .needed_well_defined = load_ty }; } @@ -33253,6 +33434,7 @@ fn typePtrOrOptionalPtrTy( ty: Type, buf: *Type.Payload.ElemType, ) !?Type { + const mod = sema.mod; switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -33281,7 +33463,7 @@ fn typePtrOrOptionalPtrTy( .optional => { const child_type = ty.optionalChild(buf); - if (child_type.zigTypeTag() != .Pointer) return null; + if (child_type.zigTypeTag(mod) != .Pointer) return null; const info = child_type.ptrInfo().data; switch (info.size) { @@ -33310,6 +33492,23 @@ fn typePtrOrOptionalPtrTy( /// TODO merge these implementations together with the "advanced"/opt_sema pattern seen /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + } + } return switch (ty.tag()) { .u1, .u8, @@ -33374,8 +33573,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -33385,11 +33582,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -33412,7 +33604,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.typeRequiresComptime(child_ty); @@ -33504,7 +33696,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) { + const mod = sema.mod; + return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; @@ -33512,19 +33705,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { fn typeAbiSize(sema: *Sema, ty: Type) !u64 { try sema.resolveTypeLayout(ty); - const target = sema.mod.getTarget(); - return ty.abiSize(target); + return ty.abiSize(sema.mod); } fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { - const target = sema.mod.getTarget(); - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - if (field.ty.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (field.ty.zigTypeTag(mod) == .NoReturn) { return @as(u32, 0); } else if (field.abi_align == 0) { return sema.typeAbiAlignment(field.ty); @@ -33605,13 +33797,14 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { } fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33620,13 +33813,13 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { } fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33645,7 +33838,8 @@ fn numberAddWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty); } @@ -33663,7 +33857,8 @@ fn intSub( rhs: Value, ty: Type, ) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -33678,13 +33873,13 @@ fn intSub( } fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33703,7 +33898,8 @@ fn numberSubWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty); } @@ -33721,14 +33917,15 @@ fn floatAdd( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33778,14 +33975,15 @@ fn floatSub( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33835,7 +34033,8 @@ fn intSubWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -33843,7 +34042,7 @@ fn intSubWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -33861,13 +34060,13 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -33889,13 +34088,14 @@ fn floatToInt( float_ty: Type, int_ty: Type, ) CompileError!Value { - if (float_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_ty.zigTypeTag(mod) == .Vector) { const elem_ty = float_ty.childType(); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); + scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33976,7 +34176,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); switch (val.tag()) { .zero, .undef, @@ -33985,9 +34186,9 @@ fn intFitsInType( .one, .bool_true, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return switch (info.signedness) { .signed => info.bits >= 2, .unsigned => info.bits >= 1, @@ -33997,9 +34198,9 @@ fn intFitsInType( else => unreachable, }, - .lazy_align => switch (ty.zigTypeTag()) { + .lazy_align => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); // If it is u16 or bigger we know the alignment fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34011,9 +34212,9 @@ fn intFitsInType( .ComptimeInt => return true, else => unreachable, }, - .lazy_size => switch (ty.zigTypeTag()) { + .lazy_size => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); // If it is u64 or bigger we know the size fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34026,41 +34227,41 @@ fn intFitsInType( else => unreachable, }, - .int_u64 => switch (ty.zigTypeTag()) { + .int_u64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_u64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); return info.bits >= needed_bits; }, .ComptimeInt => return true, else => unreachable, }, - .int_i64 => switch (ty.zigTypeTag()) { + .int_i64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_i64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.signedness == .unsigned and x < 0) return false; var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits); + return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_positive => switch (ty.zigTypeTag()) { + .int_big_positive => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_negative => switch (ty.zigTypeTag()) { + .int_big_negative => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, @@ -34068,7 +34269,7 @@ fn intFitsInType( }, .the_only_possible_value => { - assert(ty.intInfo(target).bits == 0); + assert(ty.intInfo(mod).bits == 0); return true; }, @@ -34077,9 +34278,9 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, @@ -34091,9 +34292,9 @@ fn intFitsInType( }, .aggregate => { - assert(ty.zigTypeTag() == .Vector); + assert(ty.zigTypeTag(mod) == .Vector); for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) { + if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { if (vector_index) |some| some.* = i; return false; } @@ -34122,11 +34323,8 @@ fn intInRange( } /// Asserts the type is an enum. -fn enumHasInt( - sema: *Sema, - ty: Type, - int: Value, -) CompileError!bool { +fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { + const mod = sema.mod; switch (ty.tag()) { .enum_nonexhaustive => unreachable, .enum_full => { @@ -34157,11 +34355,7 @@ fn enumHasInt( const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = try mod.intType(.unsigned, bits); return sema.intInRange(tag_ty, int, fields_len); }, .atomic_order, @@ -34186,7 +34380,8 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -34194,7 +34389,7 @@ fn intAddWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -34212,13 +34407,13 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -34243,14 +34438,15 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } @@ -34270,7 +34466,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(lhs, rhs, ty), .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema), + else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema), } } @@ -34291,14 +34487,15 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - assert(ty.zigTypeTag() == .Vector); + const mod = sema.mod; + assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34312,10 +34509,10 @@ fn compareVector( /// Handles const-ness and address spaces in particular. /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { + const mod = sema.mod; const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = ptr_ty.elemType2(); + const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const target = sema.mod.getTarget(); const parent_ty = ptr_ty.childType(); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34325,14 +34522,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { alignment: u32 = 0, vector_index: VI = .none, } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { - const elem_bits = elem_ty.bitSize(target); + const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ .host_size = @intCast(u16, parent_ty.arrayLen()), - .alignment = @intCast(u16, parent_ty.abiAlignment(target)), + .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; } else .{}; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d74fbda93e..dc556942c3 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -71,7 +71,6 @@ pub fn print( level: u8, mod: *Module, ) @TypeOf(writer).Error!void { - const target = mod.getTarget(); var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -117,10 +116,6 @@ pub fn print( .noreturn_type => return writer.writeAll("noreturn"), .null_type => return writer.writeAll("@Type(.Null)"), .undefined_type => return writer.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"), - .fn_void_no_args_type => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), .anyframe_type => return writer.writeAll("anyframe"), .const_slice_u8_type => return writer.writeAll("[]const u8"), @@ -147,7 +142,7 @@ pub fn print( if (level == 0) { return writer.writeAll(".{ ... }"); } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); @@ -160,7 +155,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -168,7 +163,7 @@ pub fn print( } return writer.writeAll("}"); } else { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const len = ty.arrayLen(); if (elem_ty.eql(Type.u8, mod)) str: { @@ -177,9 +172,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, i); + const elem = val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } const truncated = if (len > max_string_len) " (truncated)" else ""; @@ -194,7 +189,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { @@ -232,25 +227,18 @@ pub fn print( .bool_true => return writer.writeAll("true"), .bool_false => return writer.writeAll("false"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return writer.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(target); + const x = sub_ty.abiAlignment(mod); return writer.print("{d}", .{x}); }, .lazy_size => { const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(target); + const x = sub_ty.abiSize(mod); return writer.print("{d}", .{x}); }, .function => return writer.print("(function '{s}')", .{ @@ -315,7 +303,7 @@ pub fn print( }, writer, level - 1, mod); } - if (field_ptr.container_ty.zigTypeTag() == .Struct) { + if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { switch (field_ptr.container_ty.tag()) { .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), else => { @@ -323,7 +311,7 @@ pub fn print( return writer.print(".{s}", .{field_name}); }, } - } else if (field_ptr.container_ty.zigTypeTag() == .Union) { + } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.isSlice()) { @@ -352,7 +340,7 @@ pub fn print( var i: u32 = 0; try writer.writeAll(".{ "); const elem_tv = TypedValue{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(); @@ -372,7 +360,7 @@ pub fn print( } try writer.writeAll(".{ "); try print(.{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = ty.sentinel().?, }, writer, level - 1, mod); return writer.writeAll(" }"); @@ -382,8 +370,8 @@ pub fn print( return writer.writeAll(".{ ... }"); } const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(); - const len = payload.len.toUnsignedInt(target); + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -394,7 +382,7 @@ pub fn print( var elem_buf: Value.ElemValueBuffer = undefined; const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } // TODO would be nice if this had a bit of unicode awareness. diff --git a/src/Zir.zig b/src/Zir.zig index 2bd5b21f79..1063377fc7 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -2041,448 +2042,95 @@ pub const Inst = struct { /// The position of a ZIR instruction within the `Zir` instructions array. pub const Index = u32; - /// A reference to a TypedValue or ZIR instruction. + /// A reference to ZIR instruction, or to an InternPool index, or neither. /// - /// If the Ref has a tag in this enum, it refers to a TypedValue. - /// - /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. - /// - /// The first values after the the last tag refer to ZIR instructions which may - /// be derived by subtracting `typed_value_map.len`. - /// - /// When adding a tag to this enum, consider adding a corresponding entry to - /// `primitives` in astgen. + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. /// /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none, - - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - anyerror_void_error_union_type, - generic_poison_type, - - /// `undefined` (untyped) - undef, - /// `0` (comptime_int) - zero, - /// `1` (comptime_int) - one, - /// `{}` - void_value, - /// `unreachable` (noreturn type) - unreachable_value, - /// `null` (untyped) - null_value, - /// `true` - bool_true, - /// `false` - bool_false, - /// `.{}` (untyped) - empty_struct, - /// `0` (usize) - zero_usize, - /// `1` (usize) - one_usize, - /// `std.builtin.CallingConvention.C` - calling_convention_c, - /// `std.builtin.CallingConvention.Inline` - calling_convention_inline, - /// Used for generic parameters where the type and value - /// is not known until generic function instantiation. - generic_poison, - /// This is a special type for variadic parameters of a function call. - /// Casts to it will validate that the type can be passed to a c - /// calling convention function. - var_args_param, - + none = std.math.maxInt(u32), _, - - pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{ - .none = undefined, - - .u1_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u1_type), - }, - .u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u8_type), - }, - .i8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i8_type), - }, - .u16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u16_type), - }, - .i16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i16_type), - }, - .u29_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u29_type), - }, - .u32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u32_type), - }, - .i32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i32_type), - }, - .u64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u64_type), - }, - .i64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i64_type), - }, - .u128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u128_type), - }, - .i128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i128_type), - }, - .usize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.usize_type), - }, - .isize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.isize_type), - }, - .c_char_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_char_type), - }, - .c_short_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_short_type), - }, - .c_ushort_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ushort_type), - }, - .c_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_int_type), - }, - .c_uint_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_uint_type), - }, - .c_long_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_long_type), - }, - .c_ulong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulong_type), - }, - .c_longlong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longlong_type), - }, - .c_ulonglong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulonglong_type), - }, - .c_longdouble_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longdouble_type), - }, - .f16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f16_type), - }, - .f32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f32_type), - }, - .f64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f64_type), - }, - .f80_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f80_type), - }, - .f128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f128_type), - }, - .anyopaque_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyopaque_type), - }, - .bool_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.bool_type), - }, - .void_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.void_type), - }, - .type_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_type), - }, - .anyerror_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }, - .comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_int_type), - }, - .comptime_float_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_float_type), - }, - .noreturn_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.noreturn_type), - }, - .anyframe_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyframe_type), - }, - .null_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.null_type), - }, - .undefined_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.undefined_type), - }, - .fn_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_noreturn_no_args_type), - }, - .fn_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_void_no_args_type), - }, - .fn_naked_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_naked_noreturn_no_args_type), - }, - .fn_ccc_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_ccc_void_no_args_type), - }, - .single_const_pointer_to_comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.single_const_pointer_to_comptime_int_type), - }, - .const_slice_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.const_slice_u8_type), - }, - .anyerror_void_error_union_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_void_error_union_type), - }, - .generic_poison_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.generic_poison_type), - }, - .enum_literal_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.enum_literal_type), - }, - .manyptr_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_u8_type), - }, - .manyptr_const_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_const_u8_type), - }, - .atomic_order_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_order_type), - }, - .atomic_rmw_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_rmw_op_type), - }, - .calling_convention_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.calling_convention_type), - }, - .address_space_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.address_space_type), - }, - .float_mode_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.float_mode_type), - }, - .reduce_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.reduce_op_type), - }, - .modifier_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.modifier_type), - }, - .prefetch_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.prefetch_options_type), - }, - .export_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.export_options_type), - }, - .extern_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.extern_options_type), - }, - .type_info_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_info_type), - }, - - .undef = .{ - .ty = Type.initTag(.undefined), - .val = Value.initTag(.undef), - }, - .zero = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.zero), - }, - .zero_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.zero), - }, - .one = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.one), - }, - .one_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.one), - }, - .void_value = .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }, - .unreachable_value = .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }, - .null_value = .{ - .ty = Type.initTag(.null), - .val = Value.initTag(.null_value), - }, - .bool_true = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_true), - }, - .bool_false = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_false), - }, - .empty_struct = .{ - .ty = Type.initTag(.empty_struct_literal), - .val = Value.initTag(.empty_struct_value), - }, - .calling_convention_c = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_c_payload.base }, - }, - .calling_convention_inline = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base }, - }, - .generic_poison = .{ - .ty = Type.initTag(.generic_poison), - .val = Value.initTag(.generic_poison), - }, - .var_args_param = undefined, - }); - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_c_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.C), - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_inline_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.Inline), }; /// All instructions have an 8-byte payload, which is contained within @@ -4163,7 +3811,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 971ed4749d..4370977272 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -471,6 +471,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! @@ -522,8 +523,8 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1026,31 +1027,31 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1177,13 +1178,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); const operand_ty = self.air.typeOf(operand); - const operand_info = operand_ty.intInfo(self.target.*); + const operand_info = operand_ty.intInfo(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(self.target.*); + const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { const operand_lock: ?RegisterLock = switch (operand_mcv) { @@ -1257,8 +1259,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -1319,6 +1322,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -1327,7 +1331,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { // TODO convert this to mvn + and const op_reg = switch (operand) { @@ -1361,7 +1365,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -1413,13 +1417,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1907,12 +1911,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -1968,11 +1972,11 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -1999,7 +2003,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), else => unreachable, @@ -2015,12 +2020,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2049,12 +2054,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2082,12 +2087,12 @@ fn divExact( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2118,12 +2123,12 @@ fn rem( _ = maybe_inst; const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -2188,7 +2193,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), .Int => return self.fail("TODO mod on ints", .{}), @@ -2205,10 +2211,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -2240,11 +2247,11 @@ fn bitwise( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2274,10 +2281,11 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -2323,10 +2331,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -2362,7 +2371,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2388,9 +2398,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -2398,7 +2408,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2511,6 +2521,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2518,16 +2529,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...31, 33...63 => { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2639,24 +2649,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const mod = self.bin_file.options.module.?; - const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2864,6 +2873,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2871,14 +2881,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -3011,10 +3021,11 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - if (optional_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3055,16 +3066,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3098,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3134,16 +3146,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3165,10 +3178,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ // Set both registers to the X variant to get the full width @@ -3245,6 +3258,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { @@ -3253,7 +3267,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } @@ -3265,7 +3279,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we check if we can reuse the operand? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3273,9 +3287,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); - const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_align = optional_ty.abiAlignment(mod); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3289,19 +3303,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -3314,17 +3329,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -3440,8 +3456,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3597,8 +3614,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -3846,9 +3864,10 @@ fn genInlineMemsetCode( fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -3874,11 +3893,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, 4 => .ldr_immediate, 8 => .ldr_immediate, 3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}), @@ -3896,7 +3916,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3917,8 +3938,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(self.target.*); + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4069,10 +4091,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4093,10 +4116,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -4142,12 +4166,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4223,8 +4248,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4246,8 +4272,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -4289,8 +4315,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4369,7 +4394,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .x30, mcv); @@ -4410,11 +4435,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4465,8 +4491,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4501,21 +4528,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4523,7 +4550,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillCompareFlagsIfOccupied(); @@ -4687,8 +4714,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4819,13 +4846,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { + const mod = self.bin_file.options.module.?; + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4838,7 +4866,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.asr_immediate else Mir.Inst.Tag.lsr_immediate, @@ -5210,9 +5238,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5386,7 +5415,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5445,7 +5475,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5559,6 +5589,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5669,13 +5700,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, 4, 8 => .ldr_stack, else => unreachable, // unexpected abi size }; @@ -5693,13 +5724,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 4, 8 => .ldr_stack_argument, else => unreachable, // unexpected abi size }; @@ -5720,7 +5751,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5728,7 +5760,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -6087,14 +6119,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6123,22 +6156,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6222,6 +6251,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6236,14 +6266,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) }; @@ -6253,7 +6283,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6261,7 +6291,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } @@ -6279,7 +6309,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(self.target.*) == 8) { + if (ty.abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6294,14 +6324,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 16; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) }; @@ -6318,9 +6348,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6371,7 +6401,8 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (reg.class()) { .general_purpose => { diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 0c48f33ea1..cbfd6a1171 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -4,6 +4,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -14,40 +15,40 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { @@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u8 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -93,7 +95,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -113,12 +115,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type) ?Type { - switch (ty.zigTypeTag()) { +pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); for (fields.values()) |field| { - if (getFloatArrayType(field.ty)) |some| return some; + if (getFloatArrayType(field.ty, mod)) |some| return some; } return null; }, @@ -127,7 +129,7 @@ pub fn getFloatArrayType(ty: Type) ?Type { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - if (getFloatArrayType(field_ty)) |some| return some; + if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bdc1627bd6..4c7151cd47 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -520,8 +520,9 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1006,9 +1007,10 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1158,10 +1159,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const operand_abi_size = operand_ty.abiSize(mod); + const dest_abi_size = dest_ty.abiSize(mod); + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { @@ -1215,8 +1217,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 32) { if (info_a.bits > 32) { @@ -1278,6 +1281,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); @@ -1286,7 +1290,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1319,7 +1323,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 32) { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1373,13 +1377,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1582,6 +1586,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1589,16 +1594,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits < 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1695,6 +1699,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1702,16 +1707,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1859,19 +1863,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2017,7 +2022,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,16 +2042,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2067,7 +2074,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2112,16 +2119,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2143,10 +2151,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2221,19 +2229,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -2244,19 +2253,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -2361,7 +2371,8 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { 1, 4 => { @@ -2647,7 +2658,8 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const elem_ty = ptr_ty.elemType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2722,10 +2734,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2734,7 +2747,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2750,7 +2763,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const elem_size = @intCast(u32, value_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, value_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2869,10 +2883,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2892,10 +2907,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); switch (mcv) { @@ -2959,10 +2975,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2981,17 +2997,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - if (struct_ty.zigTypeTag() == .Union) { + if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3375,12 +3392,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3431,12 +3448,12 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -3463,7 +3480,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), else => unreachable, @@ -3479,12 +3497,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3522,12 +3540,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3569,7 +3587,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM div_exact", .{}), @@ -3586,12 +3605,12 @@ fn rem( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3654,7 +3673,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM mod", .{}), @@ -3671,10 +3691,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -3708,12 +3729,12 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3753,16 +3774,17 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .shr_exact => switch (lhs_ty.intInfo(mod).signedness) { .signed => Mir.Inst.Tag.asr, .unsigned => Mir.Inst.Tag.lsr, }, @@ -3791,10 +3813,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -3833,7 +3856,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3866,9 +3890,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -3876,7 +3900,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3903,11 +3927,12 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; @@ -3924,7 +3949,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } }; const data: Mir.Inst.Data = switch (abi_size) { - 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset, + 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset, 2 => rr_extra_offset, 3, 4 => rr_offset, else => unreachable, @@ -3937,7 +3962,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4197,8 +4223,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4226,8 +4253,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -4270,7 +4297,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4294,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .lr, mcv); @@ -4356,11 +4383,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4411,8 +4439,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4448,21 +4477,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4470,7 +4499,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); @@ -4636,8 +4665,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4772,8 +4801,9 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - if (operand_ty.isPtrLikeOptional()) { - assert(operand_ty.abiSize(self.target.*) == 4); + const mod = self.bin_file.options.module.?; + if (operand_ty.isPtrLikeOptional(mod)) { + assert(operand_ty.abiSize(mod) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -5131,9 +5161,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5301,7 +5332,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5382,7 +5414,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5466,6 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5640,17 +5673,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; const extra_offset = switch (abi_size) { - 1 => ty.isSignedInt(), + 1 => ty.isSignedInt(mod), 2 => true, 3, 4 => false, else => unreachable, @@ -5691,11 +5724,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 3, 4 => .ldr_stack_argument, else => unreachable, }; @@ -5712,7 +5745,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -6039,8 +6073,9 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6069,22 +6104,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6166,6 +6197,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6180,12 +6212,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6200,10 +6232,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6227,14 +6259,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 8; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; @@ -6250,9 +6282,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 8b9ec45e24..ca7fff7d08 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -1,8 +1,10 @@ const std = @import("std"); +const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -22,28 +24,28 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; const fields = ty.structFieldCount(); var i: u32 = 0; while (i < fields) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_alignment = ty.structFieldAlign(i, target); - const field_size = field_ty.bitSize(target); + const field_alignment = ty.structFieldAlign(i, mod); + const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { return Class.arrSize(bit_size, 64); } @@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; for (ty.unionFields().values()) |field| { - if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) { + if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } } @@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(target); + // const bit_size = ty.bitSize(mod); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + assert(!ty.isSlice()); return .byval; }, .ErrorUnion, @@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u32 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -134,7 +137,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5fb07c5fdc..75d5a87bf2 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -755,8 +755,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -805,22 +805,22 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -893,10 +893,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1068,18 +1069,18 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; switch (tag) { // Arithmetic operations on integers and floats .add, .sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO immediate operands return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1093,14 +1094,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1331,10 +1332,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1526,7 +1528,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -1698,6 +1701,7 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const fn_ty = self.air.typeOf(pl_op.operand); @@ -1736,7 +1740,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -1828,7 +1832,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const ty = self.air.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag() == .ErrorSet) + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); @@ -2107,7 +2111,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -2533,22 +2538,18 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -2630,6 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -2650,7 +2652,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2680,14 +2682,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index bec1b49a4e..c9e0873bce 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,16 +3,18 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + const target = mod.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -23,7 +25,7 @@ pub fn classifyType(ty: Type, target: std.Target) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -36,17 +38,17 @@ pub fn classifyType(ty: Type, target: std.Target) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b70bc0f73d..63b604857e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 32, 64 => { // Only say yes if the operation is @@ -1018,7 +1018,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { switch (arg) { .stack_offset => |off| { const mod = self.bin_file.options.module.?; - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const offset = off + abi_size; @@ -1203,6 +1203,7 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1218,14 +1219,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(self.target.*); + const abi_align = operand_ty.abiAlignment(mod); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.Big => ASI.asi_primary_little, Endian.Little => ASI.asi_primary, @@ -1294,7 +1295,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -1337,7 +1339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -1374,7 +1376,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); @@ -1422,15 +1424,15 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -1438,9 +1440,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); @@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ .lhs = bin_op.lhs, @@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -1752,10 +1754,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1814,9 +1817,10 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2037,18 +2041,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...32 => { try self.spillConditionFlagsIfOccupied(); @@ -2101,6 +2105,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -2116,7 +2121,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { const op_reg = switch (operand) { .register => |r| r, @@ -2150,7 +2155,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -2332,16 +2337,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillConditionFlagsIfOccupied(); @@ -2449,7 +2455,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.air.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -2701,7 +2709,8 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2713,7 +2722,8 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2727,7 +2737,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2747,7 +2758,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + const mod = self.bin_file.options.module.?; + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2784,7 +2796,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -2860,12 +2872,12 @@ fn binOp( .xor, .cmp_eq, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Only say yes if the operation is // commutative, i.e. we can swap both of the @@ -2934,10 +2946,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const result_reg = result.register; try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); @@ -2951,11 +2963,11 @@ fn binOp( }, .div_trunc => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = switch (tag) { .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), @@ -2984,14 +2996,14 @@ fn binOp( }, .ptr_add => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3016,7 +3028,7 @@ fn binOp( .bool_and, .bool_or, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert(lhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema @@ -3046,10 +3058,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // 32 and 64 bit operands doesn't need truncating if (int_info.bits == 32 or int_info.bits == 64) return result; @@ -3068,10 +3080,10 @@ fn binOp( .shl_exact, .shr_exact, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = rhs == .immediate; @@ -3393,7 +3405,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3978,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4152,13 +4168,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasRuntimeBits()) { + if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { + } else if (!payload_type.hasRuntimeBits(mod)) { + if (error_type.abiSize(mod) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4249,8 +4266,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4321,11 +4339,11 @@ fn minMax( ) InnerError!MCValue { const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), .Vector => return self.fail("TODO min/max on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO skip register setting when one of the operands // is a small (fits in i13) immediate. @@ -4455,6 +4473,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -4478,7 +4497,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4505,12 +4524,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) result.stack_byte_count = next_stack_offset; result.stack_align = 16; - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4528,40 +4547,37 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) return result; } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) - return MCValue{ .none = {} }; - - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); - switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { - // Constants have static lifetimes, so they are always memoized in the outer most table. - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); - if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; - gop.value_ptr.* = try self.genTypedValue(.{ - .ty = inst_ty, - .val = self.air.values[ty_pl.payload], - }); - } - return gop.value_ptr.*; - }, - .const_ty => unreachable, - else => return self.getResolvedInstValue(inst_index), + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + + if (Air.refToIndex(ref)) |inst| { + switch (self.air.instructions.items(.tag)[inst]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst), + } } + + return self.genTypedValue(.{ + .ty = ty, + .val = self.air.value(ref, mod).?, + }); } fn ret(self: *Self, mcv: MCValue) !void { @@ -4666,7 +4682,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const abi_size = value_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4707,10 +4724,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4748,8 +4766,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d4be9bf139..b592ffcb2a 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -788,9 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const val = func.air.value(ref).?; + const mod = func.bin_file.base.options.module.?; + const val = func.air.value(ref, mod).?; const ty = func.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -801,7 +802,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, func.target)) blk: { + const result = if (isByRef(ty, mod)) blk: { const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -987,8 +988,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { - return switch (ty.zigTypeTag()) { +fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { + const target = mod.getTarget(); + return switch (ty.zigTypeTag(mod)) { .Float => blk: { const bits = ty.floatBits(target); if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16 @@ -998,7 +1000,7 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { return wasm.Valtype.i32; // represented as pointer to stack }, .Int, .Enum => blk: { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack @@ -1006,22 +1008,18 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { .Struct => switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return typeToValtype(struct_obj.backing_int_ty, target); + return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, }, - .Vector => switch (determineSimdStoreStrategy(ty, target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, .Union => switch (ty.containerLayout()) { .Packed => { - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - const int_ty = Type.initPayload(&int_ty_payload.base); - return typeToValtype(int_ty, target); + const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1028,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, target)); +fn genValtype(ty: Type, mod: *Module) u8 { + return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, target: std.Target) u8 { +fn genBlockType(ty: Type, mod: *Module) u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => genValtype(ty, target), + else => genValtype(ty, mod), }; } @@ -1101,7 +1099,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const valtype = typeToValtype(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const valtype = typeToValtype(ty, mod); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1132,7 +1131,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - try func.locals.append(func.gpa, genValtype(ty, func.target)); + const mod = func.bin_file.base.options.module.?; + try func.locals.append(func.gpa, genValtype(ty, mod)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1140,48 +1140,54 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Generates a `wasm.Type` from a given function type. /// Memory is owned by the caller. -fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type { +fn genFunctype( + gpa: Allocator, + cc: std.builtin.CallingConvention, + params: []const Type, + return_type: Type, + mod: *Module, +) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, target)) { + if (firstParamSRet(cc, return_type, mod)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime()) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, target); + const res_classes = abi.classifyType(return_type, mod); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, target); - try returns.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(return_type, mod); + try returns.append(typeToValtype(scalar_type, mod)); } else { - try returns.append(typeToValtype(return_type, target)); + try returns.append(typeToValtype(return_type, mod)); } - } else if (return_type.isError()) { + } else if (return_type.isError(mod)) { try returns.append(.i32); } // param types for (params) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, target); + const param_classes = abi.classifyType(param_type, mod); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, target); - try temp_params.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(param_type, mod); + try temp_params.append(typeToValtype(scalar_type, mod)); } else { - try temp_params.append(typeToValtype(param_type, target)); + try temp_params.append(typeToValtype(param_type, mod)); } } }, - else => if (isByRef(param_type, target)) + else => if (isByRef(param_type, mod)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, target)), + try temp_params.append(typeToValtype(param_type, mod)), } } @@ -1227,7 +1233,8 @@ pub fn generate( fn genFunc(func: *CodeGen) InnerError!void { const fn_info = func.decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1254,7 +1261,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); const last_inst_ty = func.air.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } } @@ -1335,6 +1342,7 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { + const mod = func.bin_file.base.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); defer func.gpa.free(param_types); @@ -1351,7 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1361,7 +1369,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1371,7 +1379,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV }, .C => { for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, func.target); + const ty_classes = abi.classifyType(ty, mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1385,11 +1393,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, target), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => { - const ty_classes = abi.classifyType(return_type, target); + const ty_classes = abi.classifyType(return_type, mod); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1405,16 +1413,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const ty_classes = abi.classifyType(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const ty_classes = abi.classifyType(ty, mod); assert(ty_classes[0] != .none); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => { if (ty_classes[0] == .indirect) { return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, func.target); - const abi_size = scalar_type.abiSize(func.target); + const scalar_type = abi.scalarType(ty, mod); + const abi_size = scalar_type.abiSize(mod); try func.emitWValue(value); // When the value lives in the virtual stack, we must load it onto the actual stack @@ -1422,12 +1431,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: const opcode = buildOpcode(.{ .op = .load, .width = @intCast(u8, abi_size), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = value.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); } }, @@ -1436,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(func.target) == 16); + assert(ty.abiSize(mod) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1503,18 +1512,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - assert(ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(module), ty.abiSize(func.target), + ty.fmt(mod), ty.abiSize(mod), }); }; - const abi_align = ty.abiAlignment(func.target); + const abi_align = ty.abiAlignment(mod); if (abi_align > func.stack_alignment) { func.stack_alignment = abi_align; @@ -1531,6 +1540,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { + const mod = func.bin_file.base.options.module.?; const ptr_ty = func.air.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); @@ -1538,15 +1548,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(func.target); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_alignment = ptr_ty.ptrAlignment(mod); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(module), pointee_ty.abiSize(func.target), + pointee_ty.fmt(mod), pointee_ty.abiSize(mod), }); }; if (abi_alignment > func.stack_alignment) { @@ -1704,8 +1713,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, target: std.Target) bool { - switch (ty.zigTypeTag()) { +fn isByRef(ty: Type, mod: *const Module) bool { + const target = mod.getTarget(); + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -1726,40 +1736,40 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(), + => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { if (ty.castTag(.@"union")) |union_ty| { if (union_ty.data.layout == .Packed) { - return ty.abiSize(target) > 8; + return ty.abiSize(mod) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { const struct_obj = struct_ty.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { - return isByRef(struct_obj.backing_int_ty, target); + return isByRef(struct_obj.backing_int_ty, mod); } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, - .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled, - .Int => return ty.intInfo(target).bits > 64, + .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; + if (ty.isPtrLikeOptional(mod)) return false; var buf: Type.Payload.ElemType = undefined; const pl_type = ty.optionalChild(&buf); - if (pl_type.zigTypeTag() == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(); + if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; + return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, .Pointer => { // Slices act like struct and will be passed by reference @@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag() == .Vector); - if (ty.bitSize(target) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(mod) == .Vector); + if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; + const target = mod.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -2084,32 +2095,33 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const fn_info = func.decl.ty.fnInfo(); const ret_ty = fn_info.return_type; + const mod = func.bin_file.base.options.module.?; // result must be stored in the stack and we return a pointer // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { - switch (ret_ty.zigTypeTag()) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(func.target) * 8), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2123,14 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const child_type = func.air.typeOfIndex(inst).childType(); + const mod = func.bin_file.base.options.module.?; var result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { break :result try func.allocStack(Type.usize); // create pointer to void } const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { break :result func.return_value; } @@ -2141,16 +2154,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ret_ty = func.air.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2167,26 +2181,26 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); const ty = func.air.typeOf(pl_op.operand); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand) orelse break :blk null; - const module = func.bin_file.base.options.module.?; + const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = module.declPtr(extern_fn.data.owner_decl); + const ext_decl = mod.declPtr(extern_fn.data.owner_decl); const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2215,7 +2229,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_val = try func.resolveInst(arg); const arg_ty = func.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); } @@ -2226,11 +2240,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag() == .Pointer); + std.debug.assert(ty.zigTypeTag(mod) == .Pointer); const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2238,7 +2252,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; } else if (ret_ty.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -2246,10 +2260,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) { + } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2272,6 +2286,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2290,17 +2305,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); - if (isByRef(int_elem_ty, func.target)) { + if (isByRef(int_elem_ty, mod)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1); + var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); mask <<= @intCast(u6, ptr_info.bit_offset); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.host_size <= 4) @@ -2329,11 +2340,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const abi_size = ty.abiSize(func.target); - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const abi_size = ty.abiSize(mod); + switch (ty.zigTypeTag(mod)) { .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2341,26 +2353,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } - if (pl_ty.zigTypeTag() == .ErrorSet) { + if (pl_ty.zigTypeTag(mod) == .ErrorSet) { return func.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, func.target)) { + .Struct, .Array, .Union => if (isByRef(ty, mod)) { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2374,7 +2386,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, @@ -2404,7 +2416,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2418,7 +2430,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, func.target); + const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @intCast(u8, abi_size * 8), @@ -2428,21 +2440,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // store rhs value at stack pointer's location in memory try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.air.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; - if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, func.target)) { + if (isByRef(ty, mod)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2455,11 +2468,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); const shift_val = if (ptr_info.host_size <= 4) WValue{ .imm32 = ptr_info.bit_offset } else if (ptr_info.host_size <= 8) @@ -2479,25 +2488,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // load local's value from memory by its stack position try func.emitWValue(operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes const extra_index = @intCast(u32, func.mir_extra.items.len); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(func.target)); + const abi_size = @intCast(u8, ty.abiSize(mod)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, .signedness = .unsigned, @@ -2505,7 +2515,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) }, ); return WValue{ .stack = {} }; @@ -2516,8 +2526,9 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; const arg_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, func.target); + const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2527,7 +2538,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When we have an argument that's passed using more than a single parameter, // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) { + if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(func.bin_file.base.options.module.?)}, @@ -2557,6 +2568,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -2570,10 +2582,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2593,6 +2605,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2600,8 +2613,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, func.target)) { - if (ty.zigTypeTag() == .Int) { + if (isByRef(ty, mod)) { + if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( @@ -2613,8 +2626,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, func.target), - .signedness = if (ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(ty, mod), + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); try func.emitWValue(rhs); @@ -2625,7 +2638,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - if (ty.intInfo(func.target).bits > 128) { + const mod = func.bin_file.base.options.module.?; + if (ty.intInfo(mod).bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } @@ -2763,7 +2777,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2773,7 +2788,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2827,6 +2842,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); @@ -2834,7 +2850,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs_ty = func.air.typeOf(bin_op.lhs); const rhs_ty = func.air.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } @@ -2845,10 +2861,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2877,8 +2893,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - assert(ty.abiSize(func.target) <= 16); - const bitsize = @intCast(u16, ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + assert(ty.abiSize(mod) <= 16); + const bitsize = @intCast(u16, ty.bitSize(mod)); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2915,6 +2932,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { } fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -2932,15 +2950,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_ty = field_ptr.container_ty; - const field_offset = switch (parent_ty.zigTypeTag()) { + const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout()) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), - else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), + .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), + else => parent_ty.structFieldOffset(field_ptr.field_index, mod), }, .Union => switch (parent_ty.containerLayout()) { .Packed => 0, else => blk: { - const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target); + const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0; @@ -2964,7 +2982,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); + const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); }, .opt_payload_ptr => { @@ -2976,9 +2994,9 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue } fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - module.markDeclAlive(decl); + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, @@ -2992,18 +3010,18 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = atom.sym_index; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3041,31 +3059,31 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const decl_index = decl_ref_mut.data.decl_index; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Void => return WValue{ .none = {} }, .Int => { - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u6, int_info.bits), )) }, 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u7, int_info.bits), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) }, + 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, 32 => return WValue{ .float32 = val.toFloat(f32) }, @@ -3074,7 +3092,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Pointer => switch (val.tag()) { .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .zero, .null_value => return WValue{ .imm32 = 0 }, else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, @@ -3100,8 +3118,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.lowerConstant(val, int_tag_ty); } }, @@ -3115,7 +3132,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .ErrorUnion => { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); const err_val = if (!is_pl) val else Value.initTag(.zero); @@ -3123,12 +3140,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload()) { + .Optional => if (ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull()) { + } else if (val.isNull(mod)) { return WValue{ .imm32 = 0 }; } else { return func.lowerConstant(val, pl_ty); @@ -3150,7 +3167,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { - assert(determineSimdStoreStrategy(ty, target) == .direct); + assert(determineSimdStoreStrategy(ty, mod) == .direct); var buf: [16]u8 = undefined; val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; return func.storeSimdImmd(buf); @@ -3176,9 +3193,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int, .Enum => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(mod).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3197,7 +3215,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { .Optional => { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -3210,7 +3228,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}), + else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } } @@ -3218,8 +3236,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Enum => { if (val.castTag(.enum_field_index)) |field_index| { switch (ty.tag()) { @@ -3239,35 +3257,35 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.valueAsI32(val, int_tag_ty); } }, - .Int => switch (ty.intInfo(func.target).signedness) { - .signed => return @truncate(i32, val.toSignedInt(target)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))), + .Int => switch (ty.intInfo(mod).signedness) { + .signed => return @truncate(i32, val.toSignedInt(mod)), + .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))), }, .ErrorSet => { const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, - .Bool => return @intCast(i32, val.toSignedInt(target)), - .Pointer => return @intCast(i32, val.toSignedInt(target)), + .Bool => return @intCast(i32, val.toSignedInt(mod)), + .Pointer => return @intCast(i32, val.toSignedInt(mod)), else => unreachable, // Programmer called this function for an illegal type } } fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const block_ty = func.air.getRefType(ty_pl.ty); - const wasm_block_ty = genBlockType(block_ty, func.target); + const wasm_block_ty = genBlockType(block_ty, mod); const extra = func.air.extraData(Air.Block, ty_pl.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3379,16 +3397,17 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs return func.cmpOptionals(lhs, rhs, ty, op); } - } else if (isByRef(ty, func.target)) { + } else if (isByRef(ty, mod)) { return func.cmpBigInt(lhs, rhs, ty, op); } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) { return func.cmpFloat16(lhs, rhs, op); @@ -3401,13 +3420,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (ty.zigTypeTag() != .Int) break :blk .unsigned; + if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk ty.intInfo(func.target).signedness; + break :blk ty.intInfo(mod).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3464,11 +3483,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const br = func.air.instructions.items(.data)[inst].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { + if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3490,16 +3510,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (operand_ty.zigTypeTag() == .Bool) { + if (operand_ty.zigTypeTag(mod) == .Bool) { try func.emitWValue(operand); try func.addTag(.i32_eqz); const not_tmp = try func.allocLocal(operand_ty); try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(func.target).bits; + const operand_bits = operand_ty.intInfo(mod).bits; const wasm_bits = toWasmBits(operand_bits) orelse { return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); }; @@ -3566,16 +3587,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; - if (wanted_ty.bitSize(func.target) > 64) return operand; - assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt())); + if (wanted_ty.bitSize(mod) > 64) return operand; + assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, func.target), - .valtype2 = typeToValtype(given_ty, func.target), + .valtype1 = typeToValtype(wanted_ty, mod), + .valtype2 = typeToValtype(given_ty, mod), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3609,19 +3631,20 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; const result_ty = func.air.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { if (result_ty.ptrInfo().data.host_size != 0) { break :offset @as(u32, 0); } - break :offset struct_ty.packedStructFieldByteOffset(index, func.target); + break :offset struct_ty.packedStructFieldByteOffset(index, mod); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, func.target), + else => struct_ty.structFieldOffset(index, mod), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3636,6 +3659,7 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; @@ -3643,15 +3667,15 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const offset = struct_obj.packedFieldBitOffset(func.target, field_index); + const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; - const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { + const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue = if (wasm_bits == 32) @@ -3667,25 +3691,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else try func.binOp(operand, const_wvalue, backing_ty, .shr); - if (field_ty.zigTypeTag() == .Float) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) { + } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); - } else if (field_ty.isPtrAtRuntime()) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3693,8 +3709,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, func.target)) { - if (!isByRef(field_ty, func.target)) { + if (isByRef(struct_ty, mod)) { + if (!isByRef(field_ty, mod)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3704,26 +3720,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, struct_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field_ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3733,11 +3737,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); }; - if (isByRef(field_ty, func.target)) { + if (isByRef(field_ty, mod)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -3754,6 +3757,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; @@ -3787,7 +3791,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref).?; + const item_val = func.air.value(ref, mod).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -3810,7 +3814,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len]; const has_else_body = else_body.len != 0; @@ -3855,7 +3859,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for errors that are not present in any branch. This is fine as this default // case will never be hit for those cases but we do save runtime cost and size // by using a jump table for this instead of if-else chains. - break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable; + break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable; }; func.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { @@ -3866,10 +3870,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(func.target).signedness; + break :blk target_ty.intInfo(mod).signedness; }; try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body)); @@ -3882,7 +3886,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -3896,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .eq, .signedness = signedness, }); @@ -3933,6 +3937,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.air.typeOf(un_op); @@ -3948,10 +3953,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)), - .alignment = Type.anyerror.abiAlignment(func.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -3967,6 +3972,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -3975,15 +3981,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); - if (op_is_ptr or isByRef(payload_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -3994,6 +4000,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4006,17 +4013,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target))); + const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4024,18 +4032,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const pl_ty = func.air.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4043,6 +4051,7 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4050,17 +4059,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_ty = err_ty.errorUnionPayload(); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target))); + try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4074,15 +4083,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); - if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4096,8 +4106,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bitsize = @intCast(u16, given.bitSize(func.target)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bitsize = @intCast(u16, given.bitSize(mod)); + const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4110,7 +4121,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro try func.addTag(.i32_wrap_i64); } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u); + try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4119,14 +4130,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64); + break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); } else operand; // store msb first try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value - if (wanted.isSignedInt()) { + if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); @@ -4154,16 +4165,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!optional_ty.optionalReprIsPayload()) { + if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } @@ -4183,18 +4194,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const opt_ty = func.air.typeOf(ty_op.operand); const payload_ty = func.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); + if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, func.target)) { + if (isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4209,10 +4221,11 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const mod = func.bin_file.base.options.module.?; const result = result: { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4222,22 +4235,22 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); }; try func.emitWValue(operand); @@ -4251,9 +4264,10 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const payload_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const non_null_bit = try func.allocStack(Type.initTag(.u1)); try func.emitWValue(non_null_bit); try func.addImm32(1); @@ -4263,12 +4277,11 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOfIndex(inst); - if (op_ty.optionalReprIsPayload()) { + if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4314,7 +4327,8 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4328,7 +4342,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, func.target)) result: { + const result = if (!isByRef(elem_ty, mod)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4341,7 +4355,8 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4389,13 +4404,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const given_bits = @intCast(u16, given_ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bits = @intCast(u16, given_ty.bitSize(mod)); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target)); + const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4412,6 +4428,7 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4422,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime()) { + if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.store(slice_local, operand, Type.usize, 0); } @@ -4454,7 +4471,8 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack if (ptr_ty.isSlice()) { @@ -4472,7 +4490,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4489,7 +4507,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr_ty = func.air.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4513,6 +4532,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4524,13 +4544,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => ptr_ty.childType(), }; - const valtype = typeToValtype(Type.usize, func.target); + const valtype = typeToValtype(Type.usize, mod); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target)))); + try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4572,7 +4592,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const abi_size = @intCast(u32, elem_ty.abiSize(mod)); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4666,24 +4687,25 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); - if (isByRef(array_ty, func.target)) { + if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag() == .Vector); + std.debug.assert(array_ty.zigTypeTag(mod) == .Vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) { - 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u, - 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u, - 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane, - 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane, + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, + 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, + 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, + 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane, else => unreachable, }; @@ -4715,7 +4737,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -4733,17 +4755,18 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty); @@ -4757,17 +4780,18 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (op_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -4777,18 +4801,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.typeOfIndex(inst); const elem_ty = ty.childType(); - if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -4803,18 +4828,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - elem_ty.abiAlignment(func.target), + elem_ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; const result = try func.allocLocal(ty); @@ -4828,14 +4853,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(func.target); + const elem_size = elem_ty.bitSize(mod); const vector_len = @intCast(usize, ty.vectorLen()); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4855,6 +4880,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const inst_ty = func.air.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4865,16 +4891,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask_len = extra.mask_len; const child_ty = inst_ty.childType(); - const elem_size = child_ty.abiSize(func.target); + const elem_size = child_ty.abiSize(mod); - const module = func.bin_file.base.options.module.?; // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) { + if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); try func.emitWValue(result); @@ -4895,7 +4920,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else @@ -4930,13 +4955,14 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ty = func.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); + const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const sentinel = if (result_ty.sentinel()) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -4944,7 +4970,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -4974,7 +5000,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .Struct => switch (result_ty.containerLayout()) { .Packed => { - if (isByRef(result_ty, func.target)) { + if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const struct_obj = result_ty.castTag(.@"struct").?.data; @@ -4983,7 +5009,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + if (struct_obj.backing_int_ty.bitSize(mod) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -4992,20 +5018,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(func.target)); - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = value_bit_size, - }; - const int_ty = Type.initPayload(&int_ty_payload.base); + const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5027,10 +5049,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(elem_index) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5058,12 +5080,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { const union_ty = func.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(func.target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; @@ -5082,15 +5105,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, func.target)); + assert(!isByRef(union_ty, mod)); break :result tag_int; } - if (isByRef(union_ty, func.target)) { + if (isByRef(union_ty, mod)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { + if (isByRef(field.ty, mod)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { @@ -5114,26 +5137,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, union_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field.ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + if (field.ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); - } else if (field.ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field.ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5171,7 +5182,8 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); @@ -5189,7 +5201,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5207,10 +5219,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.abiSize(func.target) >= 16); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.abiSize(mod) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(func.target) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); + if (operand_ty.bitSize(mod) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5233,7 +5246,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } }, else => { - const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64; + const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' const lhs_low_bit = try func.load(lhs, Type.u64, 8); const rhs_low_bit = try func.load(rhs, Type.u64, 8); @@ -5248,10 +5261,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const un_ty = func.air.typeOf(bin_op.lhs).childType(); const tag_ty = func.air.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5271,11 +5285,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const un_ty = func.air.typeOf(ty_op.operand); const tag_ty = func.air.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5375,6 +5390,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const err_set_ty = func.air.typeOf(ty_op.operand).childType(); @@ -5386,26 +5402,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, func.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, mod)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new); + break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); const parent_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5428,6 +5445,7 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.air.typeOf(bin_op.lhs); @@ -5437,16 +5455,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(func.target) != 1) { + if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5472,12 +5490,13 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; - if (op_ty.zigTypeTag() == .Vector) { + if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); } - const int_info = op_ty.intInfo(func.target); + const int_info = op_ty.intInfo(mod); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); @@ -5527,7 +5546,8 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); const name_ty = Type.initTag(.const_slice_u8_sentinel_0); - const abi_size = name_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const abi_size = name_ty.abiSize(mod); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5566,12 +5586,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5630,15 +5651,16 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(op == .add or op == .sub); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits != 128) { return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits}); @@ -5701,6 +5723,7 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -5709,11 +5732,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs_ty = func.air.typeOf(extra.lhs); const rhs_ty = func.air.typeOf(extra.rhs); - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5721,7 +5744,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Ensure rhs is coerced to lhs as they must have the same WebAssembly types // before we can perform any binary operation. - const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?; const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try rhs_casted.toLocal(func, lhs_ty); @@ -5750,7 +5773,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5763,8 +5786,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } @@ -5773,7 +5797,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); defer overflow_bit.free(func); - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; @@ -5924,7 +5948,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5934,11 +5958,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -5954,7 +5979,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE try func.addTag(.select); // store result in local - const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty; + const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); @@ -5965,7 +5990,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -5998,12 +6024,13 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6051,12 +6078,13 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6174,12 +6202,13 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } const pl_ty = err_union_ty.errorUnionPayload(); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { // Block we can jump out of when error is not set @@ -6188,10 +6217,10 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = Type.anyerror.abiAlignment(func.target), + .alignment = Type.anyerror.abiAlignment(mod), }); } try func.addTag(.i32_eqz); @@ -6213,8 +6242,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)); - if (isByRef(pl_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6226,11 +6255,12 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); } - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); // bytes are no-op if (int_info.bits == 8) { @@ -6292,13 +6322,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt()) + const result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6306,13 +6337,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt()) + const div_result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6328,15 +6360,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - if (ty.isUnsignedInt()) { + if (ty.isUnsignedInt(mod)) { const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); - } else if (ty.isSignedInt()) { - const int_bits = ty.intInfo(func.target).bits; + } else if (ty.isSignedInt(mod)) { + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6414,7 +6447,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); }; @@ -6441,7 +6475,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal /// Retrieves the absolute value of a signed integer /// NOTE: Leaves the result value on the stack. fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6476,11 +6511,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6523,7 +6559,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; @@ -6588,8 +6625,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); @@ -6707,12 +6745,13 @@ fn callIntrinsic( }; // Always pass over C-ABI - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, func.target); + const want_sret_param = firstParamSRet(.C, return_type, mod); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -6724,14 +6763,14 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime()); + assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); try func.lowerArg(.C, param_types[arg_i], arg); } // Actually call our intrinsic try func.addLabel(.call, symbol_index); - if (!return_type.hasRuntimeBitsIgnoreComptime()) { + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; } else if (return_type.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -6759,15 +6798,15 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { + const mod = func.bin_file.base.options.module.?; const enum_decl_index = enum_ty.getOwnerDecl(); - const module = func.bin_file.base.options.module.?; var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod); + defer mod.gpa.free(fqn); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -6775,10 +6814,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); - if (int_tag_ty.bitSize(func.target) > 64) { + if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -6806,9 +6844,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .data = @intCast(u64, tag_name.len), }; const name_ty = Type.initPayload(&name_ty_payload.base); - const string_bytes = &module.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len); - const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{ + const string_bytes = &mod.string_literal_bytes; + try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); + const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ .bytes = string_bytes, }, Module.StringLiteralContext{ .bytes = string_bytes, @@ -6929,7 +6967,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -6944,11 +6982,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const module = func.bin_file.base.options.module.?; + const mod = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7019,6 +7057,7 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; @@ -7037,7 +7076,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(func.target)) { + try func.addAtomicMemArg(switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7045,14 +7084,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(func.target) > 8) { + if (ty.abiSize(mod) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7068,7 +7107,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, func.target)) val: { + const result_ptr = if (isByRef(result_ty, mod)) val: { try func.emitWValue(cmp_result); try func.addImm32(-1); try func.addTag(.i32_xor); @@ -7076,7 +7115,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target))); + try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7091,12 +7130,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); const ty = func.air.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7106,7 +7146,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { _ = try func.load(ptr, ty, 0); @@ -7117,6 +7157,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; @@ -7140,7 +7181,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7157,7 +7198,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(func.target)) { + switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7166,7 +7207,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }, ); const select_res = try func.allocLocal(ty); @@ -7185,7 +7226,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7226,7 +7267,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7255,7 +7296,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .xor, else => unreachable, }); - if (ty.isInt() and (op == .Add or op == .Sub)) { + if (ty.isInt(mod) and (op == .Add or op == .Sub)) { _ = try func.wrapOperand(.stack, ty); } try func.store(.stack, .stack, ty, ptr.offset()); @@ -7271,7 +7312,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7302,6 +7343,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); @@ -7310,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7321,7 +7363,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { try func.store(ptr, operand, ty, 0); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 4692f65dd1..7dd4425c01 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -5,9 +5,11 @@ //! Note: Above mentioned document is not an official specification, therefore called a convention. const std = @import("std"); -const Type = @import("../../type.zig").Type; const Target = std.Target; +const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); + /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. pub const Class = enum { direct, indirect, none }; @@ -19,12 +21,13 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, target: Target) [2]Class { - if (!ty.hasRuntimeBitsIgnoreComptime()) return none; - switch (ty.zigTypeTag()) { +pub fn classifyType(ty: Type, mod: *const Module) [2]Class { + const target = mod.getTarget(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar @@ -32,14 +35,14 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { // When the struct's alignment is non-natural const field = ty.structFields().values()[0]; if (field.abi_align != 0) { - if (field.abi_align > field.ty.abiAlignment(target)) { + if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; } } - return classifyType(field.ty, target); + return classifyType(field.ty, mod); }, .Int, .Enum, .ErrorSet, .Vector => { - const int_bits = ty.intInfo(target).bits; + const int_bits = ty.intInfo(mod).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -53,7 +56,7 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { .Bool => return direct, .Array => return memory, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return direct; }, .Pointer => { @@ -62,13 +65,13 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { }, .Union => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, target); + return classifyType(ty.unionFields().values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, target: std.Target) Type { - switch (ty.zigTypeTag()) { +pub fn scalarType(ty: Type, mod: *const Module) Type { + switch (ty.zigTypeTag(mod)) { .Struct => { switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return scalarType(struct_obj.backing_int_ty, target); + return scalarType(struct_obj.backing_int_ty, mod); }, else => { std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), target); + return scalarType(ty.structFieldType(0), mod); }, } }, .Union => { if (ty.containerLayout() != .Packed) { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, target); + return scalarType(ty.unionTagTypeSafety().?, mod); } std.debug.assert(ty.unionFields().count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, target); + return scalarType(ty.unionFields().values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b614200e41..826bca2266 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -605,14 +605,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, target: Target) FrameAlloc { - return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) }); + fn initType(ty: Type, mod: *const Module) FrameAlloc { + return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) + /// TODO do we need size? should be determined by inst.ty.abiSize(mod) size: u32, }; @@ -714,12 +714,12 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align), })); function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align), })); function.frame_allocs.set( @enumToInt(FrameIndex.args_frame), @@ -1565,6 +1565,7 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); @@ -1582,7 +1583,7 @@ fn gen(self: *Self) InnerError!void { // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1999,7 +2000,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - switch (lazy_sym.ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); @@ -2127,8 +2129,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } self.finishAirResult(inst, result); @@ -2252,14 +2254,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { + const mod = self.bin_file.options.module.?; const ptr_ty = self.air.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); }, - .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1), + .alignment = @max(ptr_ty.ptrAlignment(mod), 1), })); } @@ -2272,19 +2274,19 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; if (reg_ok) need_mem: { - if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16, 32, 64, 128 => 16, 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, @@ -2294,18 +2296,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, else => 8, })) { - if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod)); return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { - return switch (ty.zigTypeTag()) { +fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet { + return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, }; @@ -2449,7 +2451,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2464,7 +2467,8 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2618,14 +2622,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); + const src_int_info = src_ty.intInfo(mod); const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_int_info = dst_ty.intInfo(mod); + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2670,14 +2675,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const high_bits = src_int_info.bits % 64; if (high_bits > 0) { - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (extend) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = high_bits, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(extend, high_bits); try self.truncateRegister(high_ty, high_reg); try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); } @@ -2706,12 +2704,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2724,10 +2723,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - if (dst_ty.zigTypeTag() == .Vector) { - assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(self.target.*); - const src_info = src_ty.childType().intInfo(self.target.*); + if (dst_ty.zigTypeTag(mod) == .Vector) { + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); + const dst_info = dst_ty.childType().intInfo(mod); + const src_info = src_ty.childType().intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { 16 => switch (dst_ty.vectorLen()) { @@ -2775,7 +2774,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }, }; const full_ty = Type.initPayload(&full_pl.base); - const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); const splat_addr_mcv: MCValue = switch (splat_mcv) { @@ -2831,6 +2830,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2840,11 +2840,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2873,23 +2873,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { + const mod = self.bin_file.options.module.?; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); const dst_ty = self.air.typeOf(dst_air); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { .constant => { const src_val = self.air.values[air_data[inst].ty_pl.payload]; var space: Value.BigIntSpace = undefined; - const src_int = src_val.toBigInt(&space, self.target.*); + const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); - const src_info = src_ty.intInfo(self.target.*); + const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { .signed => src_info.bits, @@ -2908,20 +2909,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; const dst_ty = self.air.typeOfIndex(inst); - switch (dst_ty.zigTypeTag()) { + switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_info = dst_ty.intInfo(self.target.*); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = switch (tag) { + const dst_info = dst_ty.intInfo(mod); + const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mulwrap => math.max3( self.activeIntBits(bin_op.lhs), @@ -2929,8 +2928,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { dst_info.bits / 2, ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, - } }; - const src_ty = Type.initPayload(&src_pl.base); + }); try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); @@ -2942,6 +2940,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -2968,7 +2967,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -2994,7 +2993,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3005,14 +3004,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3020,6 +3019,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3046,7 +3046,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3076,14 +3076,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3091,6 +3091,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3118,7 +3119,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); @@ -3134,7 +3135,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3145,12 +3146,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; const ty = self.air.typeOf(bin_op.lhs); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3160,7 +3162,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }, bin_op.lhs, bin_op.rhs); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, @@ -3177,16 +3179,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3194,7 +3196,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3205,12 +3207,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3219,7 +3222,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty); const partial_lock = switch (partial_mcv) { @@ -3249,16 +3252,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); @@ -3266,7 +3269,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3283,6 +3286,7 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { + const mod = self.bin_file.options.module.?; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -3290,22 +3294,12 @@ fn genSetFrameTruncatedOverflowCompare( defer if (src_lock) |lock| self.register_manager.unlockReg(lock); const ty = tuple_ty.structFieldType(0); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); - var hi_limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = (int_info.bits - 1) % 64 + 1, - }; - const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); + const hi_limb_bits = (int_info.bits - 1) % 64 + 1; + const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits); - var rest_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = int_info.bits - hi_limb_pl.data, - }; - const rest_ty = Type.initPayload(&rest_pl.base); + const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits); const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); @@ -3335,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3345,23 +3339,24 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const dst_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = switch (dst_ty.zigTypeTag()) { + const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); const cc: Condition = switch (dst_info.signedness) { .unsigned => .c, .signed => .o, @@ -3369,11 +3364,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; - const src_ty = Type.initPayload(&src_pl.base); + const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -3391,26 +3383,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits <= 128 and src_pl.data == 64); + assert(dst_info.bits <= 128 and src_bits == 64); const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3433,7 +3425,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3472,8 +3465,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const int_info = ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), @@ -3585,6 +3579,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -3592,7 +3587,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { break :result if (self.liveness.isUnused(inst)) .unreach else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -3610,7 +3605,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3618,6 +3613,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.air.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); @@ -3629,11 +3625,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, mod); switch (operand) { .register => |reg| { // TODO reuse operand @@ -3678,12 +3674,13 @@ fn genUnwrapErrorUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { + const mod = self.bin_file.options.module.?; const payload_ty = err_union_ty.errorUnionPayload(); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3720,6 +3717,7 @@ fn genUnwrapErrorUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3739,8 +3737,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3755,6 +3753,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> *T fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3777,8 +3776,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3789,6 +3788,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); @@ -3803,8 +3803,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3824,8 +3824,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3853,14 +3853,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const pl_ty = self.air.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 }; + if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; const opt_ty = self.air.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); - const same_repr = opt_ty.optionalReprIsPayload(); + const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; const pl_lock: ?RegisterLock = switch (pl_mcv) { @@ -3873,7 +3874,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3900,6 +3901,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3908,11 +3910,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3922,6 +3924,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3929,11 +3932,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = eu_ty.errorUnionSet(); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3974,6 +3977,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3994,7 +3998,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4041,6 +4045,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const slice_ty = self.air.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { @@ -4050,7 +4055,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4097,6 +4102,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const array_ty = self.air.typeOf(bin_op.lhs); @@ -4108,7 +4114,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { defer if (array_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); @@ -4125,7 +4131,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( .{ ._, .lea }, @@ -4162,14 +4168,15 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const index_ty = self.air.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4207,6 +4214,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4218,8 +4226,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { @@ -4239,11 +4247,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.air.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); const tag_ty = self.air.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -4284,11 +4293,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const tag_ty = self.air.typeOfIndex(inst); const union_ty = self.air.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -4302,7 +4312,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(self.target.*); + const tag_abi_size = tag_ty.abiSize(mod); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -4337,6 +4347,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -4358,7 +4369,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); if (self.hasFeature(.lzcnt)) { if (src_bits <= 8) { const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); @@ -4405,7 +4416,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } if (src_bits > 64) - return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -4422,7 +4433,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4449,7 +4460,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4465,11 +4476,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4548,7 +4560,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4560,10 +4572,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4729,6 +4742,7 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -4738,7 +4752,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4749,10 +4763,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4847,7 +4862,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4858,17 +4873,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }; - const scalar_bits = ty.scalarType().floatBits(self.target.*); + const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4905,21 +4921,17 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_signed }, - .data = scalar_bits, - }; var vec_pl = Type.Payload.Array{ .base = .{ .tag = .vector }, .data = .{ .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = Type.initPayload(&int_pl.base), + .elem_type = try mod.intType(.signed, scalar_bits), }, }; const vec_ty = Type.initPayload(&vec_pl.base); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), self.target.*), - .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + .neg => try vec_ty.minInt(stack.get(), mod), + .fabs => try vec_ty.maxInt(stack.get(), mod), else => unreachable, }; @@ -5008,17 +5020,18 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + const mod = self.bin_file.options.module.?; if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, @@ -5041,7 +5054,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5078,9 +5091,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5092,7 +5106,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5114,7 +5128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { @@ -5186,7 +5200,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -5274,10 +5288,11 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const val_ty = ptr_info.pointee_type; - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); @@ -5382,20 +5397,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); const ptr_ty = self.air.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); - const elem_rc = regClassForType(elem_ty); - const ptr_rc = regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty, mod); + const ptr_rc = regClassForType(ptr_ty, mod); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5416,13 +5432,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const src_ty = ptr_ty.childType(); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const src_bit_size = src_ty.bitSize(self.target.*); + const src_bit_size = src_ty.bitSize(mod); const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -5555,14 +5572,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const mod = self.bin_file.options.module.?; const ptr_field_ty = self.air.typeOfIndex(inst); const ptr_container_ty = self.air.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { - .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), - .Packed => if (container_ty.zigTypeTag() == .Struct and + .Auto, .Extern => container_ty.structFieldOffset(index, mod), + .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo().data.host_size == 0) - container_ty.packedStructFieldByteOffset(index, self.target.*) + container_ty.packedStructFieldByteOffset(index, mod) else 0, }); @@ -5577,6 +5595,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { @@ -5584,17 +5603,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const index = extra.field_index; const container_ty = self.air.typeOf(operand); - const container_rc = regClassForType(container_ty); + const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; - const field_rc = regClassForType(field_ty); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(self.target.*, index) + struct_obj.data.packedFieldBitOffset(mod, index) else 0, }; @@ -5611,7 +5630,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); @@ -5733,12 +5752,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.air.typeOfIndex(inst); const parent_ty = inst_ty.childType(); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5751,9 +5771,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const src_ty = self.air.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); - if (src_ty.zigTypeTag() == .Vector) { + if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } @@ -5786,28 +5807,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); const int_info = if (src_ty.tag() == .bool) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else - src_ty.intInfo(self.target.*); + src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - var limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), - }; - const limb_ty = Type.initPayload(&limb_pl.base); + const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), }; - if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5819,7 +5834,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5866,6 +5882,7 @@ fn genShiftBinOpMir( lhs_mcv: MCValue, shift_mcv: MCValue, ) !void { + const mod = self.bin_file.options.module.?; const rhs_mcv: MCValue = rhs: { switch (shift_mcv) { .immediate => |imm| switch (imm) { @@ -5880,7 +5897,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6099,13 +6116,14 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector) { + const mod = self.bin_file.options.module.?; + if (lhs_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } - assert(rhs_ty.abiSize(self.target.*) == 1); + assert(rhs_ty.abiSize(mod) == 1); - const lhs_abi_size = lhs_ty.abiSize(self.target.*); + const lhs_abi_size = lhs_ty.abiSize(mod); if (lhs_abi_size > 16) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } @@ -6136,7 +6154,7 @@ fn genShiftBinOp( break :dst dst_mcv; }; - const signedness = lhs_ty.intInfo(self.target.*).signedness; + const signedness = lhs_ty.intInfo(mod).signedness; try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { .signed => .{ ._l, .sa }, @@ -6163,11 +6181,12 @@ fn genMulDivBinOp( lhs: MCValue, rhs: MCValue, ) !MCValue { - if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) { + const mod = self.bin_file.options.module.?; + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6184,7 +6203,7 @@ fn genMulDivBinOp( const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); - const signedness = ty.intInfo(self.target.*).signedness; + const signedness = ty.intInfo(mod).signedness; switch (tag) { .mul, .mulwrap, @@ -6338,13 +6357,14 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { + const mod = self.bin_file.options.module.?; const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { else => null, - .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias( if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { try self.register_manager.getReg(.xmm0, null); break :mask .xmm0; @@ -6384,7 +6404,7 @@ fn genBinOp( else => false, }; - const vec_op = switch (lhs_ty.zigTypeTag()) { + const vec_op = switch (lhs_ty.zigTypeTag(mod)) { else => false, .Float, .Vector => true, }; @@ -6456,7 +6476,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + const elem_size = lhs_ty.elemType2(mod).abiSize(mod); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -6506,7 +6526,7 @@ fn genBinOp( try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => switch (air_tag) { .min => .a, @@ -6520,7 +6540,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6581,7 +6601,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6657,9 +6677,9 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { + .Int => switch (lhs_ty.childType().intInfo(mod).bits) { 8 => switch (lhs_ty.vectorLen()) { 1...16 => switch (air_tag) { .add, @@ -6671,7 +6691,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6685,7 +6705,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6711,11 +6731,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6737,7 +6757,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6747,7 +6767,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6772,11 +6792,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6803,7 +6823,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6817,7 +6837,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6846,11 +6866,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -7206,14 +7226,14 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ .v_ss, .cmp }, @@ -7240,14 +7260,14 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...8 => .{ .v_ps, .blendv }, @@ -7274,14 +7294,14 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ ._ss, .cmp }, @@ -7307,14 +7327,14 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .blendv }, @@ -7338,14 +7358,14 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"and" }, @@ -7368,14 +7388,14 @@ fn genBinOp( mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .andn }, @@ -7398,14 +7418,14 @@ fn genBinOp( lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"or" }, @@ -7442,7 +7462,8 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7640,7 +7661,7 @@ fn genBinOpMir( defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); const ty_signedness = - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned; const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) { .signed => Type.usize, .unsigned => Type.isize, @@ -7796,7 +7817,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -8022,6 +8044,7 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; @@ -8029,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -8077,7 +8100,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .none, .unreach => null, .indirect => |reg_off| lock: { const ret_ty = fn_ty.fnReturnType(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -8100,8 +8123,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -8178,7 +8200,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); try self.asmRegister(.{ ._, .call }, .rax); @@ -8234,6 +8256,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -8255,9 +8278,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result = MCValue{ - .eflags = switch (ty.zigTypeTag()) { + .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const abi_size = @intCast(u16, ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8290,7 +8313,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (src_lock) |lock| self.register_manager.unlockReg(lock); break :result Condition.fromCompareOperator( - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned, result_op: { const flipped_op = if (flipped) op.reverse() else op; if (abi_size > 8) switch (flipped_op) { @@ -8404,7 +8427,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), 32 => try self.genBinOpMir( .{ ._ss, .ucomi }, @@ -8419,7 +8442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { src_mcv, ), else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), } @@ -8454,7 +8477,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.air.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*)); + const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8573,7 +8596,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -8646,6 +8670,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -8658,10 +8683,10 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8681,14 +8706,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.tag() == .bool); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); + const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8707,7 +8732,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8720,7 +8745,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8742,6 +8767,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8750,10 +8776,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8762,7 +8788,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8775,6 +8801,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const err_type = ty.errorUnionSet(); if (err_type.errorSetIsEmpty()) { @@ -8786,7 +8813,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -9088,12 +9115,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); const block_ty = self.air.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -9402,7 +9430,8 @@ const MoveStrategy = union(enum) { }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { - switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (ty.zigTypeTag(mod)) { else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ @@ -9419,8 +9448,8 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Int => switch (ty.childType().intInfo(self.target.*).bits) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Int => switch (ty.childType().intInfo(mod).bits) { 8 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, @@ -9647,7 +9676,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError } fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9730,7 +9760,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9738,7 +9768,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.scalarType().floatBits(self.target.*)) { + .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9789,7 +9819,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .indirect => try self.moveStrategy(ty, false), .load_frame => |frame_addr| try self.moveStrategy( ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod), ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, @@ -9821,7 +9851,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), .insert_extract => |ie| try self.asmRegisterMemoryImmediate( @@ -9839,7 +9869,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), } }, - .load_direct => |sym_index| switch (ty.zigTypeTag()) { + .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) { else => { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ @@ -9933,7 +9963,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -9945,7 +9976,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }), .immediate => |imm| switch (abi_size) { 1, 2, 4 => { - const immediate = if (ty.isSignedInt()) + const immediate = if (ty.isSignedInt(mod)) Immediate.s(@truncate(i32, @bitCast(i64, imm))) else Immediate.u(@intCast(u32, imm)); @@ -9967,7 +9998,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt()) + if (ty.isSignedInt(mod)) Immediate.s(@truncate( i32, @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), @@ -9991,19 +10022,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), else => false, }, .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), + ) >= ty.abiAlignment(mod), })) { .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( @@ -10017,13 +10048,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(0, mod)), ty.structFieldType(0), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(1, mod)), ty.structFieldType(1), .{ .eflags = ro.eflags }, ); @@ -10146,13 +10177,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); const result = result: { - const dst_rc = regClassForType(dst_ty); - const src_rc = regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty, mod); + const src_rc = regClassForType(src_ty, mod); const src_mcv = try self.resolveInst(ty_op.operand); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -10172,13 +10204,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); - const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const abi_size = @intCast(u16, dst_ty.abiSize(mod)); + const bit_size = @intCast(u16, dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10192,14 +10224,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const high_lock = self.register_manager.lockReg(high_reg); defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(dst_signedness, bit_size % 64); try self.truncateRegister(high_ty, high_reg); if (!dst_mcv.isRegister()) try self.genCopy( @@ -10213,6 +10238,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ty = self.air.typeOfIndex(inst); @@ -10221,11 +10247,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10235,12 +10261,13 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.air.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { @@ -10248,7 +10275,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -10261,12 +10288,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10275,7 +10302,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }, else => null, })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -10288,13 +10315,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, @@ -10312,13 +10340,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10339,12 +10367,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = self.air.typeOf(extra.ptr); const val_ty = self.air.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10433,6 +10462,7 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -10445,7 +10475,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,8 +10569,8 @@ fn atomicOp( .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { - const cc: Condition = switch (if (val_ty.isAbiInt()) - val_ty.intInfo(self.target.*).signedness + const cc: Condition = switch (if (val_ty.isAbiInt(mod)) + val_ty.intInfo(mod).signedness else .unsigned) { .unsigned => switch (op) { @@ -10728,6 +10758,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -10752,7 +10783,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { @@ -10897,8 +10928,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(self.target.*), - .alignment = inst_ty.abiAlignment(self.target.*), + .size = inst_ty.abiSize(mod), + .alignment = inst_ty.abiAlignment(mod), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -11013,14 +11044,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const vector_ty = self.air.typeOfIndex(inst); - const dst_rc = regClassForType(vector_ty); - const scalar_ty = vector_ty.scalarType(); + const dst_rc = regClassForType(vector_ty, mod); + const scalar_ty = vector_ty.scalarType(mod); const src_mcv = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - switch (scalar_ty.zigTypeTag()) { + switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { 32 => switch (vector_ty.vectorLen()) { @@ -11233,36 +11265,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Struct => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(self.target.*) }, + .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -11322,10 +11355,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11337,9 +11370,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }, .Array => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11374,11 +11407,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { const union_ty = self.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); const src_ty = self.air.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); @@ -11400,7 +11434,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_val = Value.initPayload(&tag_pl.base); var tag_int_pl: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); - const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else @@ -11424,6 +11458,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ty = self.air.typeOfIndex(inst); @@ -11466,14 +11501,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mir_tag = if (@as( ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd132 }, 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd132 }, @@ -11493,14 +11528,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd213 }, 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd213 }, @@ -11520,14 +11555,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd231 }, 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd231 }, @@ -11555,7 +11590,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11573,10 +11608,11 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime()) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { @@ -11584,7 +11620,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref).?, + .val = self.air.value(ref, mod).?, })); break :tracking gop.value_ptr; }, @@ -11597,7 +11633,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? }); + return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { @@ -11670,6 +11706,7 @@ fn resolveCallingConventionValues( var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { + const mod = self.bin_file.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_len = fn_ty.fnParamLen(); const param_types = try self.gpa.alloc(Type, param_len + var_args.len); @@ -11702,21 +11739,21 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*)); + result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); }, else => {}, } // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11725,7 +11762,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(self.target.*)), + @intCast(u32, ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11744,11 +11781,11 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime()); + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11783,8 +11820,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11798,13 +11835,13 @@ fn resolveCallingConventionValues( result.stack_align = 16; // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11819,12 +11856,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11908,9 +11945,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{ + const mod = self.bin_file.options.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(self.target.*)), + .bits = @intCast(u16, ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { @@ -11953,8 +11991,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const abi_size = ty.abiSize(self.target.*); - return switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); + return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, 2 => 16, @@ -11971,7 +12010,8 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - return self.regBitSize(ty) - ty.bitSize(self.target.*); + const mod = self.bin_file.options.module.?; + return self.regBitSize(ty) - ty.bitSize(mod); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index e79424d6d8..c8d20c73fa 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -1,10 +1,3 @@ -const std = @import("std"); -const Type = @import("../../type.zig").Type; -const Target = std.Target; -const assert = std.debug.assert; -const Register = @import("bits.zig").Register; -const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; - pub const Class = enum { integer, sse, @@ -19,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, target: Target) Class { +pub fn classifyWindows(ty: Type, mod: *const Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer, .Int, .Bool, @@ -43,10 +36,10 @@ pub fn classifyWindows(ty: Type, target: Target) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(target)) { + => switch (ty.abiSize(mod)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag()) { + else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, .Struct, .Union => if (ty.containerLayout() == .Packed) { return .win_i128; @@ -75,13 +68,14 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { + const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer => switch (ty.ptrSize()) { .Slice => { result[0] = .integer; @@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(target).bits; + const bits = ty.intInfo(mod).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -165,7 +159,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, .Vector => { const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(target) * ty.arrayLen(); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, @@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { result[0] = .integer; return result; } @@ -215,7 +209,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -230,12 +224,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.structFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } - const field_size = field.ty.abiSize(target); - const field_class_array = classifySystemV(field.ty, target, .other); + const field_size = field.ty.abiSize(mod); + const field_class_array = classifySystemV(field.ty, mod, .other); const field_class = std.mem.sliceTo(&field_class_array, .none); if (byte_i + field_size <= 8) { // Combine this field with the previous one. @@ -334,7 +328,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -347,12 +341,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.unionFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } // Combine this field with the previous one. - const field_class = classifySystemV(field.ty, target, .other); + const field_class = classifySystemV(field.ty, mod, .other); for (&result, 0..) |*result_item, i| { const field_item = field_class[i]; // "If both classes are equal, this is the resulting class." @@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return result; }, .Array => { - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty_size <= 64) { result[0] = .integer; return result; @@ -527,10 +521,17 @@ pub const RegisterClass = struct { }; }; +const builtin = @import("builtin"); +const std = @import("std"); +const Target = std.Target; +const assert = std.debug.assert; const testing = std.testing; + const Module = @import("../../Module.zig"); +const Register = @import("bits.zig").Register; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; -const builtin = @import("builtin"); fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { return .{ @@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { .is_comptime = false, }; } - -test "C_C_D" { - var fields = Module.Struct.Fields{}; - // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 }; - try fields.ensureTotalCapacity(testing.allocator, 3); - defer fields.deinit(testing.allocator); - fields.putAssumeCapacity("v1", _field(.i8, 0)); - fields.putAssumeCapacity("v2", _field(.i8, 1)); - fields.putAssumeCapacity("v3", _field(.f64, 4)); - - var C_C_D_struct = Module.Struct{ - .fields = fields, - .namespace = undefined, - .owner_decl = undefined, - .zir_index = undefined, - .layout = .Extern, - .status = .fully_resolved, - .known_non_opv = true, - .is_tuple = false, - }; - var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct }; - - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret), - ); - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg), - ); -} diff --git a/src/codegen.zig b/src/codegen.zig index adce183833..6846bebe6b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -154,7 +154,7 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; for (lazy_sym.ty.enumFields().keys()) |tag_name| { try code.ensureUnusedCapacity(tag_name.len + 1); @@ -186,22 +186,22 @@ pub fn generateSymbol( typed_value.val = rt.data; } - const target = bin_file.options.target; + const mod = bin_file.options.module.?; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const mod = bin_file.options.module.?; log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); if (typed_value.val.isUndefDeep()) { - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return Result.ok; } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Fn => { return Result{ .fail = try ErrorMsg.create( @@ -219,7 +219,7 @@ pub fn generateSymbol( 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), 80 => { writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), @@ -242,7 +242,7 @@ pub fn generateSymbol( try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); if (typed_value.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } return Result.ok; @@ -330,11 +330,11 @@ pub fn generateSymbol( .zero, .one, .int_u64, .int_big_positive => { switch (target.ptrBitWidth()) { 32 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); }, 64 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); }, else => unreachable, @@ -399,19 +399,19 @@ pub fn generateSymbol( }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), + .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), + .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), }; try code.append(x); return Result.ok; } if (info.bits > 64) { var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, target); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const start = code.items.len; try code.resize(start + abi_size); bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); @@ -420,25 +420,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(target)); + const x = @intCast(i16, typed_value.val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(target)); + const x = @intCast(i32, typed_value.val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toSignedInt(target); + const x = typed_value.val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -449,9 +449,9 @@ pub fn generateSymbol( var int_buffer: Value.Payload.U64 = undefined; const int_val = typed_value.enumToInt(&int_buffer); - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(target)); + const x = @intCast(u8, int_val.toUnsignedInt(mod)); try code.append(x); return Result.ok; } @@ -468,25 +468,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(target)); + const x = @intCast(u16, int_val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(target)); + const x = @intCast(u32, int_val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toUnsignedInt(target); + const x = int_val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(target)); + const x = @intCast(i16, int_val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(target)); + const x = @intCast(i32, int_val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toSignedInt(target); + const x = int_val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -503,7 +503,7 @@ pub fn generateSymbol( const struct_obj = typed_value.ty.castTag(.@"struct").?.data; const fields = struct_obj.fields.values(); const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const current_pos = code.items.len; try code.resize(current_pos + abi_size); var bits: u16 = 0; @@ -512,8 +512,8 @@ pub fn generateSymbol( const field_ty = fields[index].ty; // pointer may point to a decl which must be marked used // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag() == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow; + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); switch (try generateSymbol(bin_file, src_loc, .{ @@ -526,7 +526,7 @@ pub fn generateSymbol( } else { field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(u16, field_ty.bitSize(target)); + bits += @intCast(u16, field_ty.bitSize(mod)); } return Result.ok; @@ -536,7 +536,7 @@ pub fn generateSymbol( const field_vals = typed_value.val.castTag(.aggregate).?.data; for (field_vals, 0..) |field_val, index| { const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, @@ -548,7 +548,7 @@ pub fn generateSymbol( const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; if (padding > 0) { @@ -560,7 +560,7 @@ pub fn generateSymbol( }, .Union => { const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(target); + const layout = typed_value.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ @@ -584,7 +584,7 @@ pub fn generateSymbol( const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits()) { + if (!field_ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { switch (try generateSymbol(bin_file, src_loc, .{ @@ -595,7 +595,7 @@ pub fn generateSymbol( .fail => |em| return Result{ .fail = em }, } - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } @@ -620,15 +620,15 @@ pub fn generateSymbol( .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_type = typed_value.ty.optionalChild(&opt_buf); - const is_pl = !typed_value.val.isNull(); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const is_pl = !typed_value.val.isNull(mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits()) { + if (!payload_type.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); return Result.ok; } - if (typed_value.ty.optionalReprIsPayload()) { + if (typed_value.ty.optionalReprIsPayload(mod)) { if (typed_value.val.castTag(.opt_payload)) |payload| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -637,7 +637,7 @@ pub fn generateSymbol( .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull()) { + } else if (!typed_value.val.isNull(mod)) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = typed_value.val, @@ -652,7 +652,7 @@ pub fn generateSymbol( return Result.ok; } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1; + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -671,7 +671,7 @@ pub fn generateSymbol( const payload_ty = typed_value.ty.errorUnionPayload(); const is_payload = typed_value.val.errorUnionIsPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, @@ -679,9 +679,9 @@ pub fn generateSymbol( }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_align = typed_value.ty.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); // error value first when its type is larger than the error union's payload if (error_align > payload_align) { @@ -743,7 +743,7 @@ pub fn generateSymbol( try code.writer().writeInt(u32, kv.value, endian); }, else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); + try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); }, } return Result.ok; @@ -752,7 +752,7 @@ pub fn generateSymbol( .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -763,8 +763,8 @@ pub fn generateSymbol( const elem_vals = typed_value.val.castTag(.aggregate).?.data; const elem_ty = typed_value.ty.elemType(); const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -784,8 +784,8 @@ pub fn generateSymbol( const array = typed_value.val.castTag(.repeated).?.data; const elem_ty = typed_value.ty.childType(); const len = typed_value.ty.arrayLen(); - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -805,7 +805,7 @@ pub fn generateSymbol( .str_lit => { const str_lit = typed_value.val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse return error.Overflow; try code.ensureUnusedCapacity(str_lit.len + padding); code.appendSliceAssumeCapacity(bytes); @@ -832,7 +832,7 @@ fn lowerParentPtr( debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { - const target = bin_file.options.target; + const mod = bin_file.options.module.?; switch (parent_ptr.tag()) { .field_ptr => { const field_ptr = parent_ptr.castTag(.field_ptr).?.data; @@ -843,19 +843,19 @@ fn lowerParentPtr( field_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { + reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { assert(field_ptr.container_ty.isSlice()); var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), + 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod), else => unreachable, }; }, .Struct, .Union => field_ptr.container_ty.structFieldOffset( field_ptr.field_index, - target, + mod, ), else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -875,7 +875,7 @@ fn lowerParentPtr( elem_ptr.array_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), + reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), ); }, .opt_payload_ptr => { @@ -900,7 +900,7 @@ fn lowerParentPtr( eu_payload_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), + reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), ); }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( @@ -945,7 +945,7 @@ fn lowerDeclRef( reloc_info: RelocInfo, ) CodeGenError!Result { const target = bin_file.options.target; - const module = bin_file.options.module.?; + const mod = bin_file.options.module.?; if (typed_value.ty.isSlice()) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -961,7 +961,7 @@ fn lowerDeclRef( // generate length var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(module), + .data = typed_value.val.sliceLen(mod), }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, @@ -975,14 +975,14 @@ fn lowerDeclRef( } const ptr_width = target.ptrBitWidth(); - const decl = module.declPtr(decl_index); - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if (!is_fn_body and !decl.ty.hasRuntimeBits()) { + const decl = mod.declPtr(decl_index); + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -1059,16 +1059,16 @@ fn genDeclRef( tv: TypedValue, decl_index: Module.Decl.Index, ) CodeGenError!GenResult { - const module = bin_file.options.module.?; - log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); + const mod = bin_file.options.module.?; + log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) }); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -1080,20 +1080,20 @@ fn genDeclRef( } // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? - if (tv.ty.castPtrToFn()) |fn_ty| { + if (tv.ty.castPtrToFn(mod)) |fn_ty| { if (fn_ty.fnInfo().is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) }); + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } - } else if (tv.ty.zigTypeTag() == .Pointer) { - const elem_ty = tv.ty.elemType2(); - if (!elem_ty.hasRuntimeBits()) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) }); + } else if (tv.ty.zigTypeTag(mod) == .Pointer) { + const elem_ty = tv.ty.elemType2(mod); + if (!elem_ty.hasRuntimeBits(mod)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) }); } } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); - const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded; + const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; if (bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index); @@ -1186,7 +1186,7 @@ pub fn genTypedValue( } } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, @@ -1196,18 +1196,18 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = 0 }); }, .int_u64 => { - return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); + return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, } }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(target)), - .unsigned => typed_value.val.toUnsignedInt(target), + .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1216,7 +1216,7 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); var buf: Type.Payload.ElemType = undefined; @@ -1224,8 +1224,8 @@ pub fn genTypedValue( .ty = typed_value.ty.optionalChild(&buf), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); - } else if (typed_value.ty.abiSize(target) == 1) { - return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); + } else if (typed_value.ty.abiSize(mod) == 1) { + return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); } }, .Enum => { @@ -1241,9 +1241,8 @@ pub fn genTypedValue( typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; if (enum_values.count() != 0) { const tag_val = enum_values.keys()[field_index.data]; - var buf: Type.Payload.Bits = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(&buf), + .ty = typed_value.ty.intTagType(), .val = tag_val, }, owner_decl_index); } else { @@ -1253,8 +1252,7 @@ pub fn genTypedValue( else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); + const int_tag_ty = typed_value.ty.intTagType(); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty, .val = typed_value.val, @@ -1281,7 +1279,7 @@ pub fn genTypedValue( const payload_type = typed_value.ty.errorUnionPayload(); const is_pl = typed_value.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); return genTypedValue(bin_file, src_loc, .{ @@ -1306,23 +1304,23 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); } } -pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); +pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 86b74b1429..da040a6fbb 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -285,10 +286,11 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; - const val = f.air.value(ref).?; + const mod = f.object.dg.module; + const val = f.air.value(ref, mod).?; const ty = f.air.typeOf(ref); - const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -318,11 +320,11 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { + const mod = f.object.dg.module; const gpa = f.object.dg.gpa; - const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -336,10 +338,10 @@ pub const Function = struct { /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should /// not be used for persistent locals (i.e. those in `allocs`). fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; if (f.free_locals_map.getPtr(.{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), })) |locals_list| { if (locals_list.popOrNull()) |local_entry| { return .{ .new_local = local_entry.key }; @@ -352,8 +354,9 @@ pub const Function = struct { fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -364,8 +367,9 @@ pub const Function = struct { fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -377,8 +381,9 @@ pub const Function = struct { fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -390,8 +395,9 @@ pub const Function = struct { fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -522,11 +528,12 @@ pub const DeclGen = struct { decl_index: Decl.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { + if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) { return dg.writeCValue(writer, .{ .undef = ty }); } @@ -553,7 +560,7 @@ pub const DeclGen = struct { var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(dg.module), + .data = val.sliceLen(mod), }; const len_val = Value.initPayload(&len_pl.base); @@ -568,7 +575,7 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -584,6 +591,8 @@ pub const DeclGen = struct { // // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; + if (!ptr_ty.isSlice()) { try writer.writeByte('('); try dg.renderType(writer, ptr_ty); @@ -601,7 +610,6 @@ pub const DeclGen = struct { try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); }, .field_ptr => { - const target = dg.module.getTarget(); const field_ptr = ptr_val.castTag(.field_ptr).?.data; // Ensure complete type definition is visible before accessing fields. @@ -615,7 +623,7 @@ pub const DeclGen = struct { field_ptr.container_ty, ptr_ty, @intCast(u32, field_ptr.field_index), - target, + mod, )) { .begin => try dg.renderParentPtr( writer, @@ -714,19 +722,20 @@ pub const DeclGen = struct { if (val.castTag(.runtime_value)) |rt| { val = rt.data; } - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, else => .Initializer, }; - const safety_on = switch (dg.module.optimizeMode()) { + const safety_on = switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; if (val.isUndefDeep()) { - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { return writer.writeAll("0xaa"); @@ -737,8 +746,8 @@ pub const DeclGen = struct { .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}), .Float => { const bits = ty.floatBits(target); - var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -778,11 +787,11 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return dg.renderValue(writer, payload_ty, val, location); } @@ -811,7 +820,7 @@ pub const DeclGen = struct { for (0..ty.structFieldCount()) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, val, initializer_type); @@ -832,17 +841,17 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } @@ -853,7 +862,7 @@ pub const DeclGen = struct { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); } @@ -916,7 +925,7 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => switch (val.tag()) { .field_ptr, .elem_ptr, @@ -931,8 +940,8 @@ pub const DeclGen = struct { const bits = ty.floatBits(target); const f128_val = val.toFloat(f128); - var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_ty_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -1109,7 +1118,7 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; + const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), }); @@ -1131,11 +1140,11 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1145,7 +1154,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1183,10 +1192,10 @@ pub const DeclGen = struct { const payload_ty = ty.optionalChild(&opt_buf); const is_null_val = Value.makeBool(val.tag() == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; return dg.renderValue(writer, payload_ty, payload_val, location); } @@ -1218,7 +1227,7 @@ pub const DeclGen = struct { const error_ty = ty.errorUnionSet(); const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); } @@ -1263,8 +1272,7 @@ pub const DeclGen = struct { } }, else => { - var int_tag_ty_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_ty_buffer); + const int_tag_ty = ty.intTagType(); return dg.renderValue(writer, int_tag_ty, val, location); }, } @@ -1295,7 +1303,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, field_val, initializer_type); @@ -1306,13 +1314,10 @@ pub const DeclGen = struct { }, .Packed => { const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -1321,7 +1326,7 @@ pub const DeclGen = struct { for (0..field_vals.len) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; eff_num_fields += 1; } @@ -1330,7 +1335,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderValue(writer, ty, Value.undef, initializer_type); try writer.writeByte(')'); - } else if (ty.bitSize(target) > 64) { + } else if (ty.bitSize(mod) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1344,7 +1349,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; if (bit_offset_val_pl.data != 0) { @@ -1362,7 +1367,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1373,7 +1378,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1388,7 +1393,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1408,12 +1413,12 @@ pub const DeclGen = struct { const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { - if (field_ty.hasRuntimeBits()) { - if (field_ty.isPtrAtRuntime()) { + if (field_ty.hasRuntimeBits(mod)) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag() == .Float) { + } else if (field_ty.zigTypeTag(mod) == .Float) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); @@ -1427,21 +1432,21 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - if (field_ty.hasRuntimeBits()) { + if (field_ty.hasRuntimeBits(mod)) { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); try writer.writeByte(' '); } else for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } @@ -1478,9 +1483,9 @@ pub const DeclGen = struct { }, ) !void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; - const fn_decl = module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); const fn_info = fn_decl.ty.fnInfo(); @@ -1498,7 +1503,7 @@ pub const DeclGen = struct { const trailing = try renderTypePrefix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1525,7 +1530,7 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1577,9 +1582,9 @@ pub const DeclGen = struct { fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; - _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + const mod = dg.module; + _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1619,18 +1624,18 @@ pub const DeclGen = struct { /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { - const target = dg.module.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const dest_int_info = dest_ty.intInfo(target); + const mod = dg.module; + const dest_bits = dest_ty.bitSize(mod); + const dest_int_info = dest_ty.intInfo(mod); - const src_is_ptr = src_ty.isPtrAtRuntime(); + const src_is_ptr = src_ty.isPtrAtRuntime(mod); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(target); - const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null; + const src_bits = src_eff_ty.bitSize(mod); + const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1703,8 +1708,8 @@ pub const DeclGen = struct { alignment: u32, kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)); + const mod = dg.module; + const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)); try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); } @@ -1717,7 +1722,7 @@ pub const DeclGen = struct { alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; switch (std.math.order(alignas.@"align", alignas.abi)) { .lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}), @@ -1726,22 +1731,23 @@ pub const DeclGen = struct { } const trailing = - try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers); + try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { + const mod = dg.module; switch (tv.val.tag()) { .extern_fn => return true, .function => { const func = tv.val.castTag(.function).?.data; - return dg.module.decl_exports.contains(func.owner_decl); + return mod.decl_exports.contains(func.owner_decl); }, .variable => { const variable = tv.val.castTag(.variable).?.data; - return dg.module.decl_exports.contains(variable.owner_decl); + return mod.decl_exports.contains(variable.owner_decl); }, else => unreachable, } @@ -1838,10 +1844,11 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); - if (dg.module.decl_exports.get(decl_index)) |exports| { + if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); } else if (decl.isExtern()) { try writer.writeAll(mem.span(decl.name)); @@ -1850,7 +1857,7 @@ pub const DeclGen = struct { // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; var name_stream = std.io.fixedBufferStream(&name); - decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) { + decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => {}, }; try writer.print("{}__{d}", .{ @@ -1894,10 +1901,10 @@ pub const DeclGen = struct { .bits => {}, } - const target = dg.module.getTarget(); - const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{ + const mod = dg.module; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(target)), + .bits = @intCast(u16, ty.bitSize(mod)), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -1916,6 +1923,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { + const mod = dg.module; const kind: CType.Kind = switch (loc) { .FunctionArgument => .parameter, .Initializer, .Other => .complete, @@ -1923,7 +1931,7 @@ pub const DeclGen = struct { }; return std.fmt.Formatter(formatIntLiteral){ .data = .{ .dg = dg, - .int_info = ty.intInfo(dg.module.getTarget()), + .int_info = ty.intInfo(mod), .kind = kind, .cty = try dg.typeToCType(ty, kind), .val = val, @@ -2646,11 +2654,12 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; const decl = o.dg.decl.?; const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; - if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); @@ -2704,8 +2713,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); + const mod = dg.module; - switch (tv.ty.zigTypeTag()) { + switch (tv.ty.zigTypeTag(mod)) { .Fn => { const is_global = dg.declIsGlobal(tv); if (is_global) { @@ -2791,6 +2801,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { + const mod = f.object.dg.module; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { @@ -2826,10 +2837,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(); + const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. - break :blk if (lhs_scalar_ty.isInt()) + break :blk if (lhs_scalar_ty.isInt(mod)) try airBinOp(f, inst, "%", "rem", .none) else try airBinFloatOp(f, inst, "fmod"); @@ -3095,9 +3106,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3120,13 +3132,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3155,9 +3168,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3180,13 +3194,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const slice_ty = f.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.elemType2(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_ty = slice_ty.elemType2(mod); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3209,9 +3224,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3234,14 +3250,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_type = inst_ty.elemType(); - if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_type, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3250,14 +3266,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_ty, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3290,14 +3306,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const ptr_ty = f.air.typeOf(ty_op.operand); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; - if (!src_ty.hasRuntimeBitsIgnoreComptime()) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3306,9 +3323,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); - const target = f.object.dg.module.getTarget(); - const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target); - const is_array = lowersToArray(src_ty, target); + const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod); + const is_array = lowersToArray(src_ty, mod); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3327,17 +3343,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, src_ty); try writer.writeAll("))"); } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); + const host_bits: u16 = ptr_info.host_size * 8; + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_pl.data - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3345,11 +3354,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - var field_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, src_ty.bitSize(target)), - }; - const field_ty = Type.initPayload(&field_pl.base); + const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3360,9 +3365,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3390,23 +3395,23 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const op_inst = Air.refToIndex(un_op); const op_ty = f.air.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, target); + const is_array = lowersToArray(ret_ty, mod); const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, lowered_ret_ty); try writer.writeAll("memcpy("); @@ -3442,15 +3447,16 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3467,20 +3473,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - const dest_int_info = inst_scalar_ty.intInfo(target); + const inst_scalar_ty = inst_ty.scalarType(mod); + const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); - const scalar_int_info = scalar_ty.intInfo(target); + const scalar_ty = operand_ty.scalarType(mod); + const scalar_int_info = scalar_ty.intInfo(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3515,7 +3521,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const mask_val = try inst_scalar_ty.maxInt(stack.get(), target); + const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3577,17 +3583,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3602,10 +3609,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - const target = f.object.dg.module.getTarget(); const is_aligned = ptr_info.@"align" == 0 or - ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target); - const is_array = lowersToArray(ptr_info.pointee_type, target); + ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod); + const is_array = lowersToArray(ptr_info.pointee_type, mod); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3647,14 +3653,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { const host_bits = ptr_info.host_size * 8; - var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits }; - const host_ty = Type.initPayload(&host_pl.base); + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3662,7 +3663,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const src_bits = src_ty.bitSize(target); + const src_bits = src_ty.bitSize(mod); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3693,9 +3694,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -3705,7 +3706,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeByte(')'); } - if (src_ty.isPtrAtRuntime()) { + if (src_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -3728,6 +3729,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3737,7 +3739,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const inst_ty = f.air.typeOfIndex(inst); const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3765,9 +3767,10 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); @@ -3797,11 +3800,11 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat()) + const scalar_ty = operand_ty.scalarType(mod); + if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -3835,12 +3838,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const lhs_ty = f.air.typeOf(data.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - const scalar_bits = scalar_ty.bitSize(target); - if (scalar_ty.isInt() and scalar_bits > 64) + const scalar_bits = scalar_ty.bitSize(mod); + if (scalar_ty.isInt(mod) and scalar_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3885,12 +3888,12 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const target = f.object.dg.module.getTarget(); - const operand_bits = operand_ty.bitSize(target); - if (operand_ty.isInt() and operand_bits > 64) + const operand_bits = operand_ty.bitSize(mod); + if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3912,7 +3915,7 @@ fn airEquality( try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) { + if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) { // (A && B) || (C && (A == B)) // A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload @@ -3965,6 +3968,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3973,8 +3977,8 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const elem_ty = inst_scalar_ty.elemType2(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const elem_ty = inst_scalar_ty.elemType2(mod); const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); @@ -3983,7 +3987,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try v.elem(f, writer); try writer.writeAll(" = "); - if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We must convert to and from integer types to prevent UB if the operation // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. @@ -4012,13 +4016,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64) + if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) return try airBinBuiltinCall(f, inst, operation[1..], .none); if (inst_scalar_ty.isRuntimeFloat()) return try airBinFloatOp(f, inst, operation); @@ -4092,12 +4096,11 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { + const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; const gpa = f.object.dg.gpa; - const module = f.object.dg.module; - const target = module.getTarget(); const writer = f.object.writer(); const pl_op = f.air.instructions.items(.data)[inst].pl_op; @@ -4116,7 +4119,7 @@ fn airCall( resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4139,7 +4142,7 @@ fn airCall( } const callee_ty = f.air.typeOf(pl_op.operand); - const fn_ty = switch (callee_ty.zigTypeTag()) { + const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4147,13 +4150,13 @@ fn airCall( const ret_ty = fn_ty.fnReturnType(); var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); const result_local = result: { if (modifier == .always_tail) { try writer.writeAll("zig_always_tail return "); break :result .none; - } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result .none; } else if (f.liveness.isUnused(inst)) { try writer.writeByte('('); @@ -4171,7 +4174,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand) orelse break :known; + const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4181,9 +4184,9 @@ fn airCall( }; switch (modifier) { .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), - inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName( - @unionInit(LazyFnKey, @tagName(mod), fn_decl), - @unionInit(LazyFnValue.Data, @tagName(mod), {}), + inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( + @unionInit(LazyFnKey, @tagName(m), fn_decl), + @unionInit(LazyFnValue.Data, @tagName(m), {}), )), else => unreachable, } @@ -4211,7 +4214,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, target)) + if (result_local == .none or !lowersToArray(ret_ty, mod)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4254,9 +4257,10 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4330,12 +4334,13 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); const inst_ty = f.air.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { try writer.writeAll("if ("); @@ -4431,6 +4436,8 @@ const LocalResult = struct { need_free: bool, fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue { + const mod = f.object.dg.module; + if (lr.need_free) { // Move the freshly allocated local to be owned by this instruction, // by returning it here instead of freeing it. @@ -4441,7 +4448,7 @@ const LocalResult = struct { try lr.free(f); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4461,12 +4468,13 @@ const LocalResult = struct { }; fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; + const target = mod.getTarget(); const writer = f.object.writer(); - if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) { - const src_info = dest_ty.intInfo(target); - const dest_info = operand_ty.intInfo(target); + if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) { + const src_info = dest_ty.intInfo(mod); + const dest_info = operand_ty.intInfo(mod); if (src_info.signedness == dest_info.signedness and src_info.bits == dest_info.bits) { @@ -4477,7 +4485,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca } } - if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) { + if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) { const local = try f.allocLocal(0, dest_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); @@ -4494,7 +4502,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const operand_lval = if (operand == .constant) blk: { const operand_local = try f.allocLocal(0, operand_ty); try f.writeCValue(writer, operand_local, .Other); - if (operand_ty.isAbiInt()) { + if (operand_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4516,13 +4524,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeAll("));\n"); // Ensure padding bits have the expected value. - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { const dest_cty = try f.typeToCType(dest_ty, .complete); - const dest_info = dest_ty.intInfo(target); - var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, .data = dest_info.bits }; + const dest_info = dest_ty.intInfo(mod); + var bits: u16 = dest_info.bits; var wrap_cty: ?CType = null; var need_bitcasts = false; @@ -4535,9 +4540,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const elem_cty = f.indexToCType(pl.data.elem_type); wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; - info_ty_pl.data -= 1; - info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8); - info_ty_pl.data += 1; + bits -= 1; + bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits += 1; } try writer.writeAll(" = "); if (need_bitcasts) { @@ -4546,7 +4551,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = Type.initPayload(&info_ty_pl.base); + const info_ty = try mod.intType(dest_info.signedness, bits); if (wrap_cty) |cty| try f.object.dg.renderCTypeForBuiltinFnName(writer, cty) else @@ -4675,6 +4680,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4683,11 +4689,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); try writer.writeAll("switch ("); - if (condition_ty.zigTypeTag() == .Bool) { + if (condition_ty.zigTypeTag(mod) == .Bool) { try writer.writeByte('('); try f.renderType(writer, Type.u1); try writer.writeByte(')'); - } else if (condition_ty.isPtrAtRuntime()) { + } else if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -4714,12 +4720,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - if (condition_ty.isPtrAtRuntime()) { + if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -4764,6 +4770,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool { } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; @@ -4778,7 +4785,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); - const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: { + const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { try f.writeCValue(writer, local, .Other); @@ -5025,6 +5032,7 @@ fn airIsNull( operator: []const u8, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5046,14 +5054,14 @@ fn airIsNull( const payload_ty = optional_ty.optionalChild(&payload_buf); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } - else if (optional_ty.isPtrLikeOptional()) + else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } - else if (payload_ty.zigTypeTag() == .ErrorSet) + else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: { + else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; @@ -5070,6 +5078,7 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5079,7 +5088,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; } @@ -5087,7 +5096,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try f.writeCValue(writer, operand, .Other); @@ -5104,6 +5113,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); @@ -5113,14 +5123,14 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const opt_ty = ptr_ty.childType(); const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C try writer.writeAll(" = "); @@ -5134,6 +5144,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); @@ -5144,7 +5155,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { return .none; } @@ -5179,36 +5190,36 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - target: std.Target, + mod: *const Module, ) union(enum) { begin: void, field: CValue, byte_offset: u32, end: void, } { - return switch (container_ty.zigTypeTag()) { + return switch (container_ty.zigTypeTag(mod)) { .Struct => switch (container_ty.containerLayout()) { .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { if (container_ty.structFieldIsComptime(next_field_index)) continue; const field_ty = container_ty.structFieldType(next_field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else .{ .identifier = container_ty.structFieldName(next_field_index) } }; - } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin, + } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) - .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) } + .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, .Union => switch (container_ty.containerLayout()) { .Auto, .Extern => { const field_ty = container_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return if (container_ty.unionTagTypeSafety() != null and - !container_ty.unionHasAllZeroBitFieldTypes()) + !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5252,10 +5263,10 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const target = f.object.dg.module.getTarget(); const container_ptr_ty = f.air.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); @@ -5270,7 +5281,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { var u8_ptr_pl = field_ptr_ty.ptrInfo(); @@ -5321,7 +5332,7 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); const field_ptr_ty = f.air.typeOfIndex(inst); @@ -5335,7 +5346,7 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); @@ -5370,16 +5381,16 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } - const target = f.object.dg.module.getTarget(); const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); const struct_ty = f.air.typeOf(extra.struct_operand); @@ -5396,32 +5407,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .{ .identifier = struct_ty.structFieldName(extra.field_index) }, .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(target); + const int_info = struct_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(target, extra.field_index), + .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const field_int_signedness = if (inst_ty.isAbiInt()) - inst_ty.intInfo(target).signedness + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness else .unsigned; - var field_int_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (field_int_signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, - .data = @intCast(u16, inst_ty.bitSize(target)), - }; - const field_int_ty = Type.initPayload(&field_int_pl.base); + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5432,7 +5432,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5511,6 +5511,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5518,13 +5519,13 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; + const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5533,7 +5534,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { if (!error_ty.errorSetIsEmpty()) @@ -5549,6 +5550,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5558,7 +5560,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { + if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5584,10 +5586,11 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu } fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); - const repr_is_payload = inst_ty.optionalReprIsPayload(); + const repr_is_payload = inst_ty.optionalReprIsPayload(mod); const payload_ty = f.air.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5615,11 +5618,12 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5653,6 +5657,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5662,7 +5667,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); @@ -5703,12 +5708,13 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); try reap(f, inst, &.{ty_op.operand}); @@ -5735,6 +5741,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5750,7 +5757,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try writer.writeAll(" = "); if (!error_ty.errorSetIsEmpty()) - if (payload_ty.hasRuntimeBits()) + if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5768,6 +5775,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5784,7 +5792,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); @@ -5801,6 +5809,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5810,10 +5819,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" - else if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) - if (inst_ty.isSignedInt()) "fix" else "fixuns" - else if (inst_ty.isRuntimeFloat() and operand_ty.isInt()) - if (operand_ty.isSignedInt()) "float" else "floatun" + else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) + if (inst_ty.isSignedInt(mod)) "fix" else "fixuns" + else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod)) + if (operand_ty.isSignedInt(mod)) "float" else "floatun" else unreachable; @@ -5822,19 +5831,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try writer.writeAll("zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); } try writer.writeAll("zig_"); try writer.writeAll(operation); - try writer.writeAll(compilerRtAbbrev(operand_ty, target)); - try writer.writeAll(compilerRtAbbrev(inst_ty, target)); + try writer.writeAll(compilerRtAbbrev(operand_ty, mod)); + try writer.writeAll(compilerRtAbbrev(inst_ty, mod)); try writer.writeByte('('); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeByte(')'); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); } @@ -5871,14 +5880,15 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5914,6 +5924,7 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); @@ -5925,8 +5936,8 @@ fn airBinBuiltinCall( if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const scalar_ty = operand_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5968,14 +5979,15 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(data.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -6017,6 +6029,7 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6030,15 +6043,13 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const local = try f.allocLocal(inst, inst_ty); - if (inst_ty.isPtrLikeOptional()) { + if (inst_ty.isPtrLikeOptional(mod)) { { const a = try Assignment.start(f, writer, ty); try f.writeCValue(writer, local, .Other); @@ -6123,6 +6134,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6135,14 +6147,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; + const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); const is_float = ty.isRuntimeFloat(); - const is_128 = repr_pl.data == 128; - const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty; + const is_128 = repr_bits == 128; + const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6181,18 +6189,17 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.air.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6218,6 +6225,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); @@ -6228,12 +6236,10 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const element_mat = try Materialize.start(f, inst, writer, ty, element); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); @@ -6262,14 +6268,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ty = f.air.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); - const elem_abi_size = elem_ty.abiSize(target); - const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const elem_abi_size = elem_ty.abiSize(mod); + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6383,12 +6389,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); const dest_ty = f.air.typeOf(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6399,7 +6405,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { switch (dest_ty.ptrSize()) { .Slice => { const elem_ty = dest_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { try writer.print(" * {d});\n", .{elem_abi_size}); @@ -6410,7 +6416,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { .One => { const array_ty = dest_ty.childType(); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); const len = array_ty.arrayLen() * elem_abi_size; try writer.print("{d});\n", .{len}); }, @@ -6422,14 +6428,14 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); const union_ty = f.air.typeOf(bin_op.lhs).childType(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6443,14 +6449,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.air.typeOf(ty_op.operand); - const target = f.object.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const inst_ty = f.air.typeOfIndex(inst); @@ -6501,13 +6507,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6555,6 +6562,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6562,8 +6570,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const module = f.object.dg.module; - const target = module.getTarget(); const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6581,7 +6587,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("] = "); var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); var src_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = @intCast(u64, mask_elem ^ mask_elem >> 63), @@ -6597,16 +6603,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const reduce = f.air.instructions.items(.data)[inst].reduce; - const target = f.object.dg.module.getTarget(); + const target = mod.getTarget(); const scalar_ty = f.air.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); const operand_ty = f.air.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(target) <= 64; + const use_operator = scalar_ty.bitSize(mod) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; float_op: Func, @@ -6617,28 +6624,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } }, .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } }, .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" }, }, .Float => .{ .float_op = .{ .operation = "fmin" } }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" }, }, .Float => .{ .float_op = .{ .operation = "fmax" } }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits }, }, .Float => .{ .builtin = .{ .operation = "add" } }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits }, }, @@ -6680,22 +6687,22 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => Value.zero, - .And => switch (scalar_ty.zigTypeTag()) { + .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - else => switch (scalar_ty.intInfo(target).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), target), + else => switch (scalar_ty.intInfo(mod).signedness) { + .unsigned => try scalar_ty.maxInt(stack.get(), mod), .signed => Value.negative_one, }, }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), target), + .Int => try scalar_ty.maxInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.zero, - .Int => try scalar_ty.minInt(stack.get(), target), + .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6753,6 +6760,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.air.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); @@ -6770,11 +6778,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - const target = f.object.dg.module.getTarget(); - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - switch (inst_ty.zigTypeTag()) { + switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { const elem_ty = inst_ty.childType(); const a = try Assignment.init(f, elem_ty); @@ -6799,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) @@ -6813,13 +6819,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Packed => { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - const int_info = inst_ty.intInfo(target); + const int_info = inst_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -6828,7 +6830,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -6841,7 +6843,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -6849,13 +6851,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); - if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) { + if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) { try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); } else { try writer.writeByte('('); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime()) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, switch (int_info.signedness) { .unsigned => Type.usize, @@ -6872,7 +6874,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } @@ -6886,11 +6888,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.air.typeOfIndex(inst); - const target = f.object.dg.module.getTarget(); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; const payload_ty = f.air.typeOf(extra.init); @@ -6908,7 +6910,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { } const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; @@ -6991,13 +6993,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const operand_ty = f.air.typeOf(un_op); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, operand_ty); @@ -7016,13 +7019,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7043,6 +7047,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal } fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const lhs = try f.resolveInst(bin_op.lhs); @@ -7050,7 +7055,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7074,6 +7079,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7083,7 +7089,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7279,8 +7285,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 { }; } -fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 { - return if (ty.isInt()) switch (ty.intInfo(target).bits) { +fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 { + const target = mod.getTarget(); + return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) { 1...32 => "si", 33...64 => "di", 65...128 => "ti", @@ -7407,7 +7414,7 @@ fn undefPattern(comptime IntType: type) IntType { const FormatIntLiteralContext = struct { dg: *DeclGen, - int_info: std.builtin.Type.Int, + int_info: InternPool.Key.IntType, kind: CType.Kind, cty: CType, val: Value, @@ -7418,7 +7425,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const target = data.dg.module.getTarget(); + const mod = data.dg.module; + const target = mod.getTarget(); const ExpectedContents = struct { const base = 10; @@ -7449,7 +7457,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, target); + } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); @@ -7684,7 +7692,8 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - return if (ty.zigTypeTag() == .Vector) index: { + const mod = f.object.dg.module; + return if (ty.zigTypeTag(mod) == .Vector) index: { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; const local = try f.allocLocal(inst, Type.usize); @@ -7727,10 +7736,10 @@ const LowerFnRetTyBuffer = struct { values: [1]Value, payload: Type.Payload.AnonStruct, }; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { - if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); +fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { + if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn); - if (lowersToArray(ret_ty, target)) { + if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; buffer.values = [1]Value{Value.initTag(.unreachable_value)}; @@ -7742,13 +7751,13 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T return Type.initPayload(&buffer.payload.base); } - return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void; + return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, target: std.Target) bool { - return switch (ty.zigTypeTag()) { +fn lowersToArray(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, }; } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 6116d070e6..5064b84b1d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,19 +292,19 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, target: Target) AlignAs { - const abi_align = ty.abiAlignment(target); + pub fn abiAlign(ty: Type, mod: *const Module) AlignAs { + const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs { return init( - struct_ty.structFieldAlign(field_i, target), - struct_ty.structFieldType(field_i).abiAlignment(target), + struct_ty.structFieldAlign(field_i, mod), + struct_ty.structFieldType(field_i).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs { const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const union_payload_align = union_obj.abiAlignment(target, false); + const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -344,8 +344,8 @@ pub const CType = extern union { return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } - pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { - const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } }; var convert: Convert = undefined; convert.initType(ty, kind, lookup) catch unreachable; @@ -405,7 +405,7 @@ pub const CType = extern union { ); if (!gop.found_existing) { errdefer _ = self.set.map.pop(); - gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert); } if (std.debug.runtime_safety) { const adapter = TypeAdapter64{ @@ -1236,10 +1236,10 @@ pub const CType = extern union { } pub const Lookup = union(enum) { - fail: Target, + fail: *Module, imm: struct { set: *const Store.Set, - target: Target, + mod: *Module, }, mut: struct { promoted: *Store.Promoted, @@ -1254,10 +1254,14 @@ pub const CType = extern union { } pub fn getTarget(self: @This()) Target { + return self.getModule().getTarget(); + } + + pub fn getModule(self: @This()) *Module { return switch (self) { - .fail => |target| target, - .imm => |imm| imm.target, - .mut => |mut| mut.mod.getTarget(), + .fail => |mod| mod, + .imm => |imm| imm.mod, + .mut => |mut| mut.mod, }; } @@ -1272,7 +1276,7 @@ pub const CType = extern union { pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { return switch (self) { .fail => null, - .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind), .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), }; } @@ -1284,7 +1288,7 @@ pub const CType = extern union { pub fn freeze(self: @This()) @This() { return switch (self) { .fail, .imm => self, - .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } }, }; } }; @@ -1338,7 +1342,7 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "array", .type = array_idx, - .alignas = AlignAs.abiAlign(ty, lookup.getTarget()), + .alignas = AlignAs.abiAlign(ty, lookup.getModule()), }; self.initAnon(kind, fwd_idx, 1); } else self.init(switch (kind) { @@ -1350,12 +1354,12 @@ pub const CType = extern union { } pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - const target = lookup.getTarget(); + const mod = lookup.getModule(); self.* = undefined; - if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt()) switch (ty.tag()) { + else if (ty.isAbiInt(mod)) switch (ty.tag()) { .usize => self.init(.uintptr_t), .isize => self.init(.intptr_t), .c_char => self.init(.char), @@ -1367,13 +1371,13 @@ pub const CType = extern union { .c_ulong => self.init(.@"unsigned long"), .c_longlong => self.init(.@"long long"), .c_ulonglong => self.init(.@"unsigned long long"), - else => switch (tagFromIntInfo(ty.intInfo(target))) { + else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), .array => switch (kind) { .forward, .complete, .global => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); + const abi_size = ty.abiSize(mod); + const abi_align = ty.abiAlignment(mod); self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ @@ -1389,7 +1393,7 @@ pub const CType = extern union { .payload => unreachable, }, }, - } else switch (ty.zigTypeTag()) { + } else switch (ty.zigTypeTag(mod)) { .Frame => unreachable, .AnyFrame => unreachable, @@ -1434,12 +1438,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "ptr", .type = ptr_idx, - .alignas = AlignAs.abiAlign(ptr_ty, target), + .alignas = AlignAs.abiAlign(ptr_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "len", .type = Tag.uintptr_t.toIndex(), - .alignas = AlignAs.abiAlign(Type.usize, target), + .alignas = AlignAs.abiAlign(Type.usize, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1462,12 +1466,8 @@ pub const CType = extern union { }, }; - var host_int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = info.host_size * 8, - }; const pointee_ty = if (info.host_size > 0 and info.vector_index == .none) - Type.initPayload(&host_int_pl.base) + try mod.intType(.unsigned, info.host_size * 8) else info.pointee_type; @@ -1490,11 +1490,9 @@ pub const CType = extern union { if (ty.castTag(.@"struct")) |struct_obj| { try self.initType(struct_obj.data.backing_int_ty, kind, lookup); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - try self.initType(Type.initPayload(&buf.base), kind, lookup); + const bits = @intCast(u16, ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, bits); + try self.initType(int_ty, kind, lookup); } } else if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { @@ -1505,7 +1503,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, @@ -1555,7 +1553,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "payload", .type = payload_idx.?, - .alignas = AlignAs.unionPayloadAlign(ty, target), + .alignas = AlignAs.unionPayloadAlign(ty, mod), }; field_count += 1; } @@ -1563,7 +1561,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "tag", .type = tag_idx.?, - .alignas = AlignAs.abiAlign(tag_ty.?, target), + .alignas = AlignAs.abiAlign(tag_ty.?, mod), }; field_count += 1; } @@ -1576,7 +1574,7 @@ pub const CType = extern union { } }; self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; } else self.init(.@"struct"); - } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) { self.init(.void); } else { var is_packed = false; @@ -1586,9 +1584,9 @@ pub const CType = extern union { else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = AlignAs.fieldAlign(ty, field_i, target); + const field_align = AlignAs.fieldAlign(ty, field_i, mod); if (field_align.@"align" < field_align.abi) { is_packed = true; if (!lookup.isMutable()) break; @@ -1643,8 +1641,8 @@ pub const CType = extern union { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (ty.optionalReprIsPayload()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); } else if (switch (kind) { .forward, .forward_parameter => @as(Index, undefined), @@ -1661,12 +1659,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = AlignAs.abiAlign(Type.bool, target), + .alignas = AlignAs.abiAlign(Type.bool, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1699,12 +1697,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = AlignAs.abiAlign(error_ty, target), + .alignas = AlignAs.abiAlign(error_ty, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1733,7 +1731,7 @@ pub const CType = extern union { }; _ = try lookup.typeToIndex(info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(param_type, param_kind); } } @@ -1900,16 +1898,16 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType { var convert: Convert = undefined; - try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); - return createFromConvert(store, ty, target, kind, &convert); + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); + return createFromConvert(store, ty, mod, kind, &convert); } fn createFromConvert( store: *Store.Promoted, ty: Type, - target: Target, + mod: *Module, kind: Kind, convert: Convert, ) !CType { @@ -1930,7 +1928,7 @@ pub const CType = extern union { .packed_struct, .packed_union, => { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), @@ -1941,7 +1939,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } @@ -1950,7 +1948,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; fields_pl[c_field_i] = .{ @@ -1962,12 +1960,12 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }), - .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = AlignAs.fieldAlign(ty, field_i, target), + .alignas = AlignAs.fieldAlign(ty, field_i, mod), }; } @@ -2004,7 +2002,7 @@ pub const CType = extern union { const struct_pl = try arena.create(Payload.Aggregate); struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?, } }; return initPayload(struct_pl); }, @@ -2026,21 +2024,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2067,12 +2065,12 @@ pub const CType = extern union { } pub fn eql(self: @This(), ty: Type, cty: CType) bool { + const mod = self.lookup.getModule(); switch (self.convert.value) { .cty => |c| return c.eql(cty), .tag => |t| { if (t != cty.tag()) return false; - const target = self.lookup.getTarget(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2084,7 +2082,7 @@ pub const CType = extern union { ]u8 = undefined; const c_fields = cty.cast(Payload.Fields).?.data; - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(), @@ -2093,7 +2091,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; const c_field = &c_fields[c_field_i]; @@ -2113,7 +2111,7 @@ pub const CType = extern union { else => unreachable, }, mem.span(c_field.name), - ) or AlignAs.fieldAlign(ty, field_i, target).@"align" != + ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; } return true; @@ -2146,7 +2144,7 @@ pub const CType = extern union { .function, .varargs_function, => { - if (ty.zigTypeTag() != .Fn) return false; + if (ty.zigTypeTag(mod) != .Fn) return false; const info = ty.fnInfo(); assert(!info.is_generic); @@ -2162,7 +2160,7 @@ pub const CType = extern union { var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; @@ -2202,7 +2200,7 @@ pub const CType = extern union { .tag => |t| { autoHash(hasher, t); - const target = self.lookup.getTarget(); + const mod = self.lookup.getModule(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2211,15 +2209,15 @@ pub const CType = extern union { std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; - const zig_ty_tag = ty.zigTypeTag(); - for (0..switch (ty.zigTypeTag()) { + const zig_ty_tag = ty.zigTypeTag(mod); + for (0..switch (ty.zigTypeTag(mod)) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { .forward, .forward_parameter => .forward, @@ -2234,7 +2232,7 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }); - autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align"); + autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, @@ -2271,7 +2269,7 @@ pub const CType = extern union { self.updateHasherRecurse(hasher, info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, param_type, param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index beb2309455..c3d3da0d32 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -598,7 +598,7 @@ pub const Object = struct { }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); @@ -880,28 +880,28 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - module: *Module, + mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness, ) !void { const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); - const target = module.getTarget(); + const decl = mod.declPtr(decl_index); + const target = mod.getTarget(); var dg: DeclGen = .{ .context = o.context, .object = o, - .module = module, + .module = mod, .decl_index = decl_index, .decl = decl, .err_msg = null, - .gpa = module.gpa, + .gpa = mod.gpa, }; const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (module.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -922,7 +922,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - const ssp_buf_size = module.comp.bin_file.options.stack_protector; + const ssp_buf_size = mod.comp.bin_file.options.stack_protector; if (ssp_buf_size != 0) { var buf: [12]u8 = undefined; const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable; @@ -931,7 +931,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - if (module.comp.bin_file.options.stack_check) { + if (mod.comp.bin_file.options.stack_check) { dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack"); } else if (target.os.tag == .uefi) { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); @@ -954,17 +954,17 @@ pub const Object = struct { // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = decl.ty.fnInfo(); - const sret = firstParamSRet(fn_info, target); + const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError() and - module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) llvm_func.getParam(@boolToInt(ret_ptr != null)) @@ -989,8 +989,8 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); @@ -1007,14 +1007,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1026,14 +1026,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addArgAttr(llvm_func, llvm_arg_i, "noundef"); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1048,10 +1048,10 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try dg.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = dg.context.intType(abi_size * 8); const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); @@ -1060,7 +1060,7 @@ pub const Object = struct { try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1078,7 +1078,7 @@ pub const Object = struct { dg.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -1087,7 +1087,7 @@ pub const Object = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); } const ptr_param = llvm_func.getParam(llvm_arg_i); @@ -1105,7 +1105,7 @@ pub const Object = struct { const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(target); + const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); for (field_types, 0..) |_, field_i_usize| { @@ -1117,7 +1117,7 @@ pub const Object = struct { store_inst.setAlignment(target.ptrBitWidth() / 8); } - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const loaded = if (is_by_ref) arg_ptr else l: { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); load_inst.setAlignment(param_alignment); @@ -1139,11 +1139,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1157,11 +1157,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1180,7 +1180,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and - !module.decl_exports.contains(decl_index); + !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) llvm.DIFlags.NoReturn else @@ -1196,7 +1196,7 @@ pub const Object = struct { true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember | noret_bit, - module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); try dg.object.di_map.put(gpa, decl, subprogram.toNode()); @@ -1219,7 +1219,7 @@ pub const Object = struct { .func_inst_table = .{}, .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = module.comp.bin_file.options.single_threaded, + .single_threaded = mod.comp.bin_file.options.single_threaded, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1232,14 +1232,14 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void { @@ -1275,37 +1275,40 @@ pub const Object = struct { pub fn updateDeclExports( self: *Object, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { + const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.isExtern()) { - const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction(); + const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); const mangle_name = is_wasm_fn and decl.getExternFn().?.lib_name != null and !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? }); + const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl.name, decl.getExternFn().?.lib_name.?, + }); break :name tmp.ptr; } else decl.name; - defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0)); + defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); - try self.extern_collisions.put(module.gpa, decl_index, {}); + try self.extern_collisions.put(gpa, decl_index, {}); } } llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); di_func.replaceLinkageName(linkage_name); @@ -1329,9 +1332,9 @@ pub const Object = struct { const exp_name = exports[0].options.name; llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); @@ -1353,8 +1356,8 @@ pub const Object = struct { .protected => llvm_global.setVisibility(.Protected), } if (exports[0].options.section) |section| { - const section_z = try module.gpa.dupeZ(u8, section); - defer module.gpa.free(section_z); + const section_z = try gpa.dupeZ(u8, section); + defer gpa.free(section_z); llvm_global.setSection(section_z); } if (decl.val.castTag(.variable)) |variable| { @@ -1370,8 +1373,8 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name); - defer module.gpa.free(exp_name_z); + const exp_name_z = try gpa.dupeZ(u8, exp.options.name); + defer gpa.free(exp_name_z); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1385,14 +1388,14 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try decl.getFullyQualifiedName(mod); + defer gpa.free(fqn); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); if (decl.val.castTag(.variable)) |variable| { - const single_threaded = module.comp.bin_file.options.single_threaded; + const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.data.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { @@ -1479,14 +1482,15 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; const dib = o.di_builder.?; - switch (ty.zigTypeTag()) { + const mod = o.module; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => { const di_type = dib.createBasicType("void", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1494,7 +1498,7 @@ pub const Object = struct { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_type = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; @@ -1503,7 +1507,7 @@ pub const Object = struct { const owner_decl_index = ty.getOwnerDecl(); const owner_decl = o.module.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. @@ -1522,9 +1526,8 @@ pub const Object = struct { }; const field_index_val = Value.initPayload(&buf_field_index.base); - var buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buffer); - const int_info = ty.intInfo(target); + const int_ty = ty.intTagType(); + const int_info = ty.intInfo(mod); assert(int_info.bits != 0); for (field_names, 0..) |field_name, i| { @@ -1536,7 +1539,7 @@ pub const Object = struct { const field_int_val = field_index_val.enumToInt(ty, &buf_u64); var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, target); + const bigint = field_int_val.toBigInt(&bigint_space, mod); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -1566,8 +1569,8 @@ pub const Object = struct { name, di_file, owner_decl.src_node + 1, - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, enumerators.ptr, @intCast(c_int, enumerators.len), try o.lowerDebugType(int_ty, .full), @@ -1604,7 +1607,7 @@ pub const Object = struct { !ptr_info.mutable or ptr_info.@"volatile" or ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) + !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { var payload: Type.Payload.Pointer = .{ .data = .{ @@ -1623,7 +1626,7 @@ pub const Object = struct { }, }, }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { + if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { payload.data.pointee_type = Type.anyopaque; } const bland_ptr_ty = Type.initPayload(&payload.base); @@ -1657,10 +1660,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const ptr_size = ptr_ty.abiSize(target); - const ptr_align = ptr_ty.abiAlignment(target); - const len_size = len_ty.abiSize(target); - const len_align = len_ty.abiAlignment(target); + const ptr_size = ptr_ty.abiSize(mod); + const ptr_align = ptr_ty.abiAlignment(mod); + const len_size = len_ty.abiSize(mod); + const len_align = len_ty.abiAlignment(mod); var offset: u64 = 0; offset += ptr_size; @@ -1697,8 +1700,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1719,7 +1722,7 @@ pub const Object = struct { const ptr_di_ty = dib.createPointerType( elem_di_ty, target.ptrBitWidth(), - ty.ptrAlignment(target) * 8, + ty.ptrAlignment(mod) * 8, name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -1750,8 +1753,8 @@ pub const Object = struct { }, .Array => { const array_di_ty = dib.createArrayType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(), .full), @intCast(c_int, ty.arrayLen()), ); @@ -1760,14 +1763,14 @@ pub const Object = struct { return array_di_ty; }, .Vector => { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. - const elem_di_type = switch (elem_ty.zigTypeTag()) { + const elem_di_type = switch (elem_ty.zigTypeTag(mod)) { .Int => blk: { - const info = elem_ty.intInfo(target); + const info = elem_ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1782,8 +1785,8 @@ pub const Object = struct { }; const vector_di_ty = dib.createVectorType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, elem_di_type, ty.vectorLen(), ); @@ -1796,13 +1799,13 @@ pub const Object = struct { defer gpa.free(name); var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); @@ -1826,10 +1829,10 @@ pub const Object = struct { }; const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(target); - const payload_align = child_ty.abiAlignment(target); - const non_null_size = non_null_ty.abiSize(target); - const non_null_align = non_null_ty.abiAlignment(target); + const payload_size = child_ty.abiSize(mod); + const payload_align = child_ty.abiAlignment(mod); + const non_null_size = non_null_ty.abiSize(mod); + const non_null_align = non_null_ty.abiAlignment(mod); var offset: u64 = 0; offset += payload_size; @@ -1866,8 +1869,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1883,7 +1886,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); @@ -1907,10 +1910,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const error_size = Type.anyerror.abiSize(target); - const error_align = Type.anyerror.abiAlignment(target); - const payload_size = payload_ty.abiSize(target); - const payload_align = payload_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_size = payload_ty.abiSize(mod); + const payload_align = payload_ty.abiAlignment(mod); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -1957,8 +1960,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1988,12 +1991,12 @@ pub const Object = struct { const struct_obj = payload.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); - const info = struct_obj.backing_int_ty.intInfo(target); + const info = struct_obj.backing_int_ty.intInfo(mod); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2026,10 +2029,10 @@ pub const Object = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_size = field_ty.abiSize(target); - const field_align = field_ty.abiAlignment(target); + const field_size = field_ty.abiSize(mod); + const field_align = field_ty.abiAlignment(mod); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2057,8 +2060,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2093,7 +2096,7 @@ pub const Object = struct { } } - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const owner_decl_index = ty.getOwnerDecl(); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); @@ -2114,11 +2117,11 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_size = field.ty.abiSize(target); - const field_align = field.alignment(target, layout); + const field_size = field.ty.abiSize(mod); + const field_align = field.alignment(mod, layout); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2143,8 +2146,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2179,7 +2182,7 @@ pub const Object = struct { }; const union_obj = ty.cast(Type.Payload.Union).?.data; - if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) { + if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2188,7 +2191,7 @@ pub const Object = struct { return union_di_ty; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full); @@ -2198,8 +2201,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &di_fields, @@ -2225,10 +2228,10 @@ pub const Object = struct { const field_name = kv.key_ptr.*; const field = kv.value_ptr.*; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const field_name_copy = try gpa.dupeZ(u8, field_name); defer gpa.free(field_name_copy); @@ -2258,8 +2261,8 @@ pub const Object = struct { union_name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, @intCast(c_int, di_fields.items.len), @@ -2319,8 +2322,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &full_di_fields, @@ -2341,8 +2344,8 @@ pub const Object = struct { defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - const sret = firstParamSRet(fn_info, target); + if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const sret = firstParamSRet(fn_info, mod); const di_ret_ty = if (sret) Type.void else fn_info.return_type; try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); @@ -2358,7 +2361,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError() and + if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -2370,9 +2373,9 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = param_ty, @@ -2450,7 +2453,7 @@ pub const Object = struct { const stack_trace_str: []const u8 = "StackTrace"; // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined); + const builtin_ty = mod.declPtr(builtin_decl).val.toType(); const builtin_namespace = builtin_ty.getNamespace().?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; @@ -2458,7 +2461,7 @@ pub const Object = struct { // Sema should have ensured that StackTrace was analyzed. assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(undefined); + return stack_trace_decl.val.toType(); } }; @@ -2495,9 +2498,10 @@ pub const DeclGen = struct { if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(target)); + global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { @@ -2569,19 +2573,20 @@ pub const DeclGen = struct { /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); const fn_info = zig_fn_type.fnInfo(); - const target = dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); @@ -2593,7 +2598,7 @@ pub const DeclGen = struct { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { - if (dg.module.getTarget().isWasm()) { + if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); if (decl.getExternFn().?.lib_name) |lib_name| { const module_name = std.mem.sliceTo(lib_name, 0); @@ -2612,8 +2617,8 @@ pub const DeclGen = struct { llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull"); @@ -2656,14 +2661,14 @@ pub const DeclGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -2784,12 +2789,13 @@ pub const DeclGen = struct { fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const llvm_ty = try lowerTypeInner(dg, t); + const mod = dg.module; if (std.debug.runtime_safety and false) check: { - if (t.zigTypeTag() == .Opaque) break :check; - if (!t.hasRuntimeBits()) break :check; + if (t.zigTypeTag(mod) == .Opaque) break :check; + if (!t.hasRuntimeBits(mod)) break :check; if (!llvm_ty.isSized().toBool()) break :check; - const zig_size = t.abiSize(dg.module.getTarget()); + const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); if (llvm_size != zig_size) { log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{ @@ -2802,18 +2808,18 @@ pub const DeclGen = struct { fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const gpa = dg.gpa; - const target = dg.module.getTarget(); - switch (t.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (t.zigTypeTag(mod)) { .Void, .NoReturn => return dg.context.voidType(), .Int => { - const info = t.intInfo(target); + const info = t.intInfo(mod); assert(info.bits != 0); return dg.context.intType(info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const int_ty = t.intTagType(&buffer); - const bit_count = int_ty.intInfo(target).bits; + const int_ty = t.intTagType(); + const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); }, @@ -2863,7 +2869,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(); - assert(elem_ty.onePossibleValue() == null); + assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -2875,11 +2881,11 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const child_ty = t.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } const payload_llvm_ty = try dg.lowerType(child_ty); - if (t.optionalReprIsPayload()) { + if (t.optionalReprIsPayload(mod)) { return payload_llvm_ty; } @@ -2887,8 +2893,8 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Type = .{ payload_llvm_ty, dg.context.intType(8), undefined, }; - const offset = child_ty.abiSize(target) + 1; - const abi_size = t.abiSize(target); + const offset = child_ty.abiSize(mod) + 1; + const abi_size = t.abiSize(mod); const padding = @intCast(c_uint, abi_size - offset); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2898,17 +2904,17 @@ pub const DeclGen = struct { }, .ErrorUnion => { const payload_ty = t.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } const llvm_error_type = try dg.lowerType(Type.anyerror); const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiSize(mod); var fields_buf: [3]*llvm.Type = undefined; if (error_align > payload_align) { @@ -2964,9 +2970,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -2979,7 +2985,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field_ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3027,11 +3033,11 @@ pub const DeclGen = struct { var big_align: u32 = 1; var any_underaligned_fields = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(target); + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); any_underaligned_fields = any_underaligned_fields or field_align < field_ty_align; big_align = @max(big_align, field_align); @@ -3046,7 +3052,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3074,11 +3080,11 @@ pub const DeclGen = struct { // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const layout = t.unionGetLayout(target); + const layout = t.unionGetLayout(mod); const union_obj = t.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(target)); + const bitsize = @intCast(c_uint, t.bitSize(mod)); const int_llvm_ty = dg.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -3155,19 +3161,19 @@ pub const DeclGen = struct { } fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { - const target = dg.module.getTarget(); + const mod = dg.module; const fn_info = fn_ty.fnInfo(); const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); defer llvm_params.deinit(); - if (firstParamSRet(fn_info, target)) { + if (firstParamSRet(fn_info, mod)) { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing) + if (fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -3189,14 +3195,14 @@ pub const DeclGen = struct { }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; var opt_buf: Type.Payload.ElemType = undefined; - const ptr_ty = if (param_ty.zigTypeTag() == .Optional) + const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); @@ -3215,7 +3221,7 @@ pub const DeclGen = struct { }, .float_array => |count| { const param_ty = fn_info.param_types[it.zig_index - 1]; - const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?); + const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); @@ -3239,11 +3245,12 @@ pub const DeclGen = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type { - const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + const mod = dg.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), - else => elem_ty.hasRuntimeBitsIgnoreComptime(), + .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) try dg.lowerType(elem_ty) @@ -3262,9 +3269,9 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const target = dg.module.getTarget(); - - switch (tv.ty.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); @@ -3276,8 +3283,8 @@ pub const DeclGen = struct { .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), else => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, target); - const int_info = tv.ty.intInfo(target); + const bigint = tv.val.toBigInt(&bigint_space, mod); + const int_info = tv.ty.intInfo(mod); assert(int_info.bits != 0); const llvm_type = dg.context.intType(int_info.bits); @@ -3304,9 +3311,9 @@ pub const DeclGen = struct { const int_val = tv.enumToInt(&int_buffer); var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, target); + const bigint = int_val.toBigInt(&bigint_space, mod); - const int_info = tv.ty.intInfo(target); + const int_info = tv.ty.intInfo(mod); const llvm_type = dg.context.intType(int_info.bits); const unsigned_val = v: { @@ -3408,7 +3415,7 @@ pub const DeclGen = struct { }, .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { @@ -3439,7 +3446,7 @@ pub const DeclGen = struct { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; if (tv.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @@ -3549,13 +3556,13 @@ pub const DeclGen = struct { const payload_ty = tv.ty.optionalChild(&buf); const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(); + const is_pl = !tv.val.isNull(mod); const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload()) { + if (tv.ty.optionalReprIsPayload(mod)) { if (tv.val.castTag(.opt_payload)) |payload| { return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { @@ -3564,7 +3571,7 @@ pub const DeclGen = struct { return llvm_ty.constNull(); } } - assert(payload_ty.zigTypeTag() != .Fn); + assert(payload_ty.zigTypeTag(mod) != .Fn); const llvm_field_count = llvm_ty.countStructElementTypes(); var fields_buf: [3]*llvm.Value = undefined; @@ -3607,14 +3614,14 @@ pub const DeclGen = struct { const payload_type = tv.ty.errorUnionPayload(); const is_pl = tv.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } - const payload_align = payload_type.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_type.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, .val = if (is_pl) Value.initTag(.zero) else tv.val, @@ -3661,9 +3668,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { if (tuple.values[i].tag() != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3685,7 +3692,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3715,7 +3722,7 @@ pub const DeclGen = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -3723,15 +3730,15 @@ pub const DeclGen = struct { var running_bits: u16 = 0; for (field_vals, 0..) |field_val, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_val, }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3756,10 +3763,10 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); + const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3781,7 +3788,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3810,7 +3817,7 @@ pub const DeclGen = struct { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; - const layout = tv.ty.unionGetLayout(target); + const layout = tv.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return lowerValue(dg, .{ @@ -3824,12 +3831,12 @@ pub const DeclGen = struct { const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits()) + if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field_ty.isPtrAtRuntime()) + const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3842,13 +3849,13 @@ pub const DeclGen = struct { // must pointer cast to the expected type before accessing the union. var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field); - const field_size = field_ty.abiSize(target); + const field_size = field_ty.abiSize(mod); if (field_size == layout.payload_size) { break :p field; } @@ -4012,7 +4019,8 @@ pub const DeclGen = struct { } fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4045,13 +4053,13 @@ pub const DeclGen = struct { const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag()) { + switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout() == .Packed) { return parent_llvm_ptr; } - const layout = parent_ty.unionGetLayout(target); + const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. @@ -4077,8 +4085,8 @@ pub const DeclGen = struct { const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields().values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - b += @intCast(usize, field.ty.bitSize(target)); + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); } break :b b; }; @@ -4091,14 +4099,14 @@ pub const DeclGen = struct { var ty_buf: Type.Payload.Pointer = undefined; const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } @@ -4132,8 +4140,8 @@ pub const DeclGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - payload_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) { // In this case, we represent pointer to optional the same as pointer // to the payload. @@ -4153,13 +4161,13 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. return parent_llvm_ptr; } - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; const llvm_u32 = dg.context.intType(32); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), @@ -4177,12 +4185,13 @@ pub const DeclGen = struct { tv: TypedValue, decl_index: Module.Decl.Index, ) Error!*llvm.Value { + const mod = self.module; if (tv.ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(self.module), + .data = tv.val.sliceLen(mod), }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ @@ -4202,7 +4211,7 @@ pub const DeclGen = struct { // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = self.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.castTag(.function)) |func| { if (func.data.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.data.owner_decl); @@ -4213,21 +4222,21 @@ pub const DeclGen = struct { } } - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or (is_fn_body and decl.ty.fnInfo().is_generic)) { return self.lowerPtrToVoid(tv.ty); } - self.module.markDeclAlive(decl); + mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) else try self.resolveGlobalDecl(decl_index); - const target = self.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { @@ -4236,7 +4245,7 @@ pub const DeclGen = struct { } else llvm_decl_val; const llvm_type = try self.lowerType(tv.ty); - if (tv.ty.zigTypeTag() == .Int) { + if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { return llvm_val.constBitCast(llvm_type); @@ -4338,21 +4347,20 @@ pub const DeclGen = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { - const target = dg.module.getTarget(); - var buffer: Type.Payload.Bits = undefined; - const int_ty = switch (ty.zigTypeTag()) { + const mod = dg.module; + const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(&buffer), + .Enum => ty.intTagType(), .Float => { if (!is_rmw_xchg) return null; - return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); }, .Bool => return dg.context.intType(8), else => return null, }; - const bit_count = int_ty.intInfo(target).bits; + const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); } else { return null; } @@ -4366,15 +4374,15 @@ pub const DeclGen = struct { fn_info: Type.Payload.Function.Data, llvm_arg_i: u32, ) void { - const target = dg.module.getTarget(); - if (param_ty.isPtrAtRuntime()) { + const mod = dg.module; + if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo().data; if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } - if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") { + if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") { dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -4383,13 +4391,10 @@ pub const DeclGen = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max( - ptr_info.pointee_type.abiAlignment(target), - 1, - ); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align); } - } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) { + } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"), .unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"), }; @@ -4490,9 +4495,10 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst); if (gop.found_existing) return gop.value_ptr.*; + const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.air.typeOf(inst), - .val = self.air.value(inst).?, + .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -4500,11 +4506,12 @@ pub const FuncGen = struct { fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { const llvm_val = try self.dg.lowerValue(tv); - if (!isByRef(tv.ty)) return llvm_val; + const mod = self.dg.module; + if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); @@ -4512,7 +4519,7 @@ pub const FuncGen = struct { global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(target)); + global.setAlignment(tv.ty.abiAlignment(mod)); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) else @@ -4775,7 +4782,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const mod = self.dg.module; + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4783,20 +4791,20 @@ pub const FuncGen = struct { const fn_info = zig_fn_ty.fnInfo(); const return_type = fn_info.return_type; const llvm_fn = try self.resolveInst(pl_op.operand); - const target = self.dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa); defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try self.dg.lowerType(return_type); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target)); + const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError() and + const err_return_tracing = fn_info.return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4810,8 +4818,8 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4823,10 +4831,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = llvm_arg.typeOf(); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); @@ -4839,10 +4847,10 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = try self.dg.lowerType(param_ty); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -4859,11 +4867,11 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4871,7 +4879,7 @@ pub const FuncGen = struct { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const int_ptr = self.buildAlloca(int_llvm_ty, alignment); @@ -4896,11 +4904,11 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(target)); + store_inst.setAlignment(param_ty.abiAlignment(mod)); break :p p; }; @@ -4924,17 +4932,17 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } - const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?); + const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_llvm_ty = float_ty.arrayType(count); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4944,15 +4952,15 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4969,7 +4977,7 @@ pub const FuncGen = struct { "", ); - if (callee_ty.zigTypeTag() == .Pointer) { + if (callee_ty.zigTypeTag(mod) == .Pointer) { // Add argument attributes for function pointer calls. it = iterateParamTypes(self.dg, fn_info); it.llvm_index += @boolToInt(sret); @@ -4978,7 +4986,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -4986,7 +4994,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; const param_llvm_ty = try self.dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -5013,7 +5021,7 @@ pub const FuncGen = struct { self.dg.addArgAttr(call, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { self.dg.addArgAttr(call, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -5022,7 +5030,7 @@ pub const FuncGen = struct { if (ptr_info.@"align" != 0) { self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align); } }, @@ -5033,7 +5041,7 @@ pub const FuncGen = struct { return null; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5041,12 +5049,12 @@ pub const FuncGen = struct { if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(target)); + loaded.setAlignment(return_type.abiAlignment(mod)); return loaded; } } @@ -5061,7 +5069,7 @@ pub const FuncGen = struct { const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); @@ -5070,10 +5078,10 @@ pub const FuncGen = struct { } } - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(target); + const alignment = return_type.abiAlignment(mod); const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); @@ -5084,6 +5092,7 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.air.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { @@ -5098,8 +5107,8 @@ pub const FuncGen = struct { return null; } const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5113,10 +5122,9 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); - const alignment = ret_ty.abiAlignment(target); + const alignment = ret_ty.abiAlignment(mod); - if (isByRef(ret_ty)) { + if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); @@ -5145,8 +5153,9 @@ pub const FuncGen = struct { const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + const mod = self.dg.module; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5162,10 +5171,9 @@ pub const FuncGen = struct { return null; } const ptr = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(target)); + loaded.setAlignment(ret_ty.abiAlignment(mod)); _ = self.builder.buildRet(loaded); return null; } @@ -5184,9 +5192,9 @@ pub const FuncGen = struct { const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); + const mod = self.dg.module; - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; @@ -5202,7 +5210,7 @@ pub const FuncGen = struct { const args: [2]*llvm.Value = .{ dest_list, src_list }; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return dest_list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); @@ -5227,11 +5235,11 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const va_list_ty = self.air.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; @@ -5243,7 +5251,7 @@ pub const FuncGen = struct { const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); @@ -5292,23 +5300,23 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var int_buffer: Type.Payload.Bits = undefined; var opt_buffer: Type.Payload.ElemType = undefined; - const scalar_ty = operand_ty.scalarType(); - const int_ty = switch (scalar_ty.zigTypeTag()) { - .Enum => scalar_ty.intTagType(&int_buffer), + const mod = self.dg.module; + const scalar_ty = operand_ty.scalarType(mod); + const int_ty = switch (scalar_ty.zigTypeTag(mod)) { + .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - operand_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty); + const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = try self.dg.lowerType(scalar_ty); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); @@ -5375,7 +5383,7 @@ pub const FuncGen = struct { .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; - const is_signed = int_ty.isSignedInt(); + const is_signed = int_ty.isSignedInt(mod); const operation: llvm.IntPredicate = switch (op) { .eq => .EQ, .neq => .NE, @@ -5393,6 +5401,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.air.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); + const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5414,8 +5423,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(parent_bb); // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag() == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; + const is_body = inst_ty.zigTypeTag(mod) == .Fn; + if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const raw_llvm_ty = try self.dg.lowerType(inst_ty); @@ -5424,7 +5433,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty)) { + if (is_body or isByRef(inst_ty, mod)) { break :ty self.context.pointerType(0); } break :ty raw_llvm_ty; @@ -5445,7 +5454,8 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.air.typeOf(branch.operand); - if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) { + const mod = self.dg.module; + if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5481,6 +5491,7 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -5488,7 +5499,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(pl_op.operand); const payload_ty = self.air.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } @@ -5512,9 +5523,9 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !?*llvm.Value { + const mod = fg.dg.module; const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); - const target = fg.dg.module.getTarget(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -5529,8 +5540,8 @@ pub const FuncGen = struct { err_union; break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } - const err_field_index = errUnionErrorOffset(payload_ty, target); - if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_index = errUnionErrorOffset(payload_ty, mod); + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -5555,30 +5566,31 @@ pub const FuncGen = struct { if (!payload_has_bits) { return if (operand_is_ptr) err_union else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return fg.builder.buildExtractValue(err_union, offset, ""); } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_usize = self.context.intType(target.ptrBitWidth()); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") @@ -5645,6 +5657,7 @@ pub const FuncGen = struct { } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); @@ -5652,7 +5665,7 @@ pub const FuncGen = struct { const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime()) { + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } @@ -5666,30 +5679,31 @@ pub const FuncGen = struct { } fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target)); + const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); } else { break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); @@ -5698,7 +5712,7 @@ pub const FuncGen = struct { const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); - const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un"; + const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ sign_prefix, @@ -5724,27 +5738,28 @@ pub const FuncGen = struct { fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt()) { + if (dest_scalar_ty.isSignedInt(mod)) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target))); + const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5756,7 +5771,7 @@ pub const FuncGen = struct { const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits); const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); - const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns"; + const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ @@ -5786,13 +5801,14 @@ pub const FuncGen = struct { } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); switch (ty.ptrSize()) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); const elem_ty = ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); @@ -5800,7 +5816,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(); const elem_ty = array_ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); }, .Many, .C => unreachable, @@ -5823,6 +5839,7 @@ pub const FuncGen = struct { } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.air.typeOf(bin_op.lhs); @@ -5833,12 +5850,11 @@ pub const FuncGen = struct { const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, slice_ty); @@ -5858,6 +5874,7 @@ pub const FuncGen = struct { } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -5866,15 +5883,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); const elem_ty = array_ty.childType(); - if (isByRef(array_ty)) { + if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { const lhs_index = Air.refToIndex(bin_op.lhs).?; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -5901,6 +5917,7 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); @@ -5917,23 +5934,23 @@ pub const FuncGen = struct { const indices: [1]*llvm.Value = .{rhs}; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, ptr_ty); } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5972,6 +5989,7 @@ pub const FuncGen = struct { } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5979,29 +5997,28 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } - const target = self.dg.module.getTarget(); - if (!isByRef(struct_ty)) { - assert(!isByRef(field_ty)); - switch (struct_ty.zigTypeTag()) { + if (!isByRef(struct_ty, mod)) { + assert(!isByRef(field_ty, mod)); + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const bit_offset = struct_obj.packedFieldBitOffset(target, field_index); + const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6010,7 +6027,7 @@ pub const FuncGen = struct { }, else => { var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, @@ -6018,13 +6035,13 @@ pub const FuncGen = struct { assert(struct_ty.containerLayout() == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6035,30 +6052,30 @@ pub const FuncGen = struct { } } - switch (struct_ty.zigTypeTag()) { + switch (struct_ty.zigTypeTag(mod)) { .Struct => { assert(struct_ty.containerLayout() != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false); + return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false); } else { return self.load(field_ptr, field_ptr_ty); } }, .Union => { const union_llvm_ty = try self.dg.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try self.dg.lowerType(field_ty); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6072,6 +6089,7 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -6079,7 +6097,7 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); const parent_ty = self.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { @@ -6119,12 +6137,13 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const decl_index = func.owner_decl; - const decl = self.dg.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; @@ -6136,22 +6155,41 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(self.dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer self.gpa.free(fqn); - const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index); + const is_internal_linkage = !mod.decl_exports.contains(decl_index); + var fn_ty_pl: Type.Payload.Function = .{ + .base = .{ .tag = .function }, + .data = .{ + .param_types = &.{}, + .comptime_params = undefined, + .return_type = Type.void, + .alignment = 0, + .noalias_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }, + }; + const fn_ty = Type.initPayload(&fn_ty_pl.base); const subprogram = dib.createFunction( di_file.toScope(), decl.name, fqn, di_file, line_number, - try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full), + try self.dg.object.lowerDebugType(fn_ty, .full), is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember, - self.dg.module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); @@ -6243,10 +6281,11 @@ pub const FuncGen = struct { null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); const insert_block = self.builder.getInsertBlock(); - if (isByRef(operand_ty)) { + const mod = self.dg.module; + if (isByRef(operand_ty, mod)) { _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = operand_ty.abiAlignment(mod); const alloca = self.buildAlloca(operand.typeOf(), alignment); const store_inst = self.builder.buildStore(operand, alloca); store_inst.setAlignment(alignment); @@ -6294,7 +6333,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); var llvm_ret_i: usize = 0; var llvm_param_i: usize = 0; @@ -6322,7 +6362,7 @@ pub const FuncGen = struct { if (output != .none) { const output_inst = try self.resolveInst(output); const output_ty = self.air.typeOf(output); - assert(output_ty.zigTypeTag() == .Pointer); + assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); if (llvm_ret_indirect[i]) { @@ -6376,13 +6416,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.air.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; - if (isByRef(arg_ty)) { + if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_llvm_ty = try self.dg.lowerType(arg_ty); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); @@ -6394,7 +6434,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); store_inst.setAlignment(alignment); @@ -6599,7 +6639,7 @@ pub const FuncGen = struct { const output_ptr_ty = self.air.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(target)); + store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); } else { ret_val = output_value; } @@ -6622,7 +6662,8 @@ pub const FuncGen = struct { const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (optional_ty.optionalReprIsPayload()) { + const mod = self.dg.module; + if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6638,7 +6679,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6647,7 +6688,7 @@ pub const FuncGen = struct { return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); @@ -6662,6 +6703,7 @@ pub const FuncGen = struct { op: llvm.IntPredicate, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); @@ -6679,7 +6721,7 @@ pub const FuncGen = struct { } } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "") else @@ -6687,10 +6729,9 @@ pub const FuncGen = struct { return self.builder.buildICmp(op, loaded, zero, ""); } - const target = self.dg.module.getTarget(); - const err_field_index = errUnionErrorOffset(payload_ty, target); + const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -6702,17 +6743,18 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. return operand; } @@ -6723,18 +6765,19 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { comptime assert(optional_layout_version == 3); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -6754,20 +6797,21 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. return operand; } const opt_llvm_ty = try self.dg.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -6776,6 +6820,7 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -6783,25 +6828,24 @@ pub const FuncGen = struct { const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const result_ty = self.air.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (self.canElideLoad(body_tail)) return payload_ptr; - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return self.builder.buildExtractValue(operand, offset, ""); @@ -6812,6 +6856,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -6828,15 +6873,14 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); } - const target = self.dg.module.getTarget(); - const offset = errUnionErrorOffset(payload_ty, target); + const offset = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); @@ -6846,30 +6890,30 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.air.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const target = self.dg.module.getTarget(); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); { - const error_offset = errUnionErrorOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const payload_offset = errUnionPayloadOffset(payload_ty, target); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); } @@ -6885,15 +6929,14 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const mod = self.dg.module; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); @@ -6901,20 +6944,20 @@ pub const FuncGen = struct { } fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand; } const llvm_optional_ty = try self.dg.lowerType(optional_ty); - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target)); + if (isByRef(optional_ty, mod)) { + const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6931,24 +6974,24 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6964,23 +7007,23 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -7021,6 +7064,7 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7032,8 +7076,7 @@ pub const FuncGen = struct { const loaded_vector = blk: { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - const target = self.dg.module.getTarget(); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target)); + load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); break :blk load_inst; }; @@ -7043,24 +7086,26 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } @@ -7081,14 +7126,15 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } @@ -7103,14 +7149,15 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } @@ -7118,14 +7165,15 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } @@ -7140,28 +7188,30 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -7176,14 +7226,15 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } @@ -7201,38 +7252,39 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); + if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7258,40 +7310,43 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7301,10 +7356,9 @@ pub const FuncGen = struct { const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); return self.builder.buildSelect(ltz, c, a, ""); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + if (scalar_ty.isSignedInt(mod)) { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7386,6 +7440,7 @@ pub const FuncGen = struct { signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, ) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7393,16 +7448,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; + const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); @@ -7410,12 +7463,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7486,8 +7538,9 @@ pub const FuncGen = struct { ty: Type, params: [2]*llvm.Value, ) !*llvm.Value { + const mod = self.dg.module; const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { @@ -7531,7 +7584,7 @@ pub const FuncGen = struct { .gte => .SGE, }; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(); const vector_result_ty = llvm_i32.vectorType(vec_len); @@ -7587,8 +7640,9 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]*llvm.Value, ) !*llvm.Value { - const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const mod = self.dg.module; + const target = mod.getTarget(); + const scalar_ty = ty.scalarType(mod); const llvm_ty = try self.dg.lowerType(ty); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7615,7 +7669,7 @@ pub const FuncGen = struct { const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); - const result = if (ty.zigTypeTag() == .Vector) blk: { + const result = if (ty.zigTypeTag(mod) == .Vector) blk: { const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); @@ -7662,7 +7716,7 @@ pub const FuncGen = struct { .libc => |fn_name| b: { const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); } @@ -7686,6 +7740,7 @@ pub const FuncGen = struct { } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7694,21 +7749,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt()) + const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildAShr(result, casted_rhs, "") else self.builder.buildLShr(result, casted_rhs, ""); @@ -7716,12 +7769,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7763,6 +7815,7 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7770,20 +7823,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7791,12 +7843,10 @@ pub const FuncGen = struct { const lhs_type = self.air.typeOf(bin_op.lhs); const rhs_type = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(); - const rhs_scalar_ty = rhs_type.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_type.scalarType(mod); + const rhs_scalar_ty = rhs_type.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; @@ -7804,6 +7854,7 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7811,17 +7862,16 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const tg = self.dg.module.getTarget(); - const lhs_bits = lhs_scalar_ty.bitSize(tg); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) self.builder.buildZExt(rhs, lhs.typeOf(), "") else rhs; - const result = if (lhs_scalar_ty.isSignedInt()) + const result = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildSShlSat(lhs, casted_rhs, "") else self.builder.buildUShlSat(lhs, casted_rhs, ""); @@ -7834,7 +7884,7 @@ pub const FuncGen = struct { const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { const vec_len = rhs_ty.vectorLen(); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); @@ -7847,6 +7897,7 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7854,16 +7905,14 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - const is_signed_int = lhs_scalar_ty.isSignedInt(); + const is_signed_int = lhs_scalar_ty.isSignedInt(mod); if (is_exact) { if (is_signed_int) { @@ -7881,14 +7930,14 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(target); + const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_info = operand_ty.intInfo(target); + const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { switch (operand_info.signedness) { @@ -7910,11 +7959,12 @@ pub const FuncGen = struct { } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7939,11 +7989,12 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7985,10 +8036,10 @@ pub const FuncGen = struct { } fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { - const operand_is_ref = isByRef(operand_ty); - const result_is_ref = isByRef(inst_ty); + const mod = self.dg.module; + const operand_is_ref = isByRef(operand_ty, mod); + const result_is_ref = isByRef(inst_ty, mod); const llvm_dest_ty = try self.dg.lowerType(inst_ty); - const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8001,20 +8052,20 @@ pub const FuncGen = struct { return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) { + if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) { + if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { const elem_ty = operand_ty.childType(); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } const array_ptr = self.buildAlloca(llvm_dest_ty, null); - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(target)); + llvm_store.setAlignment(inst_ty.abiAlignment(mod)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. @@ -8033,19 +8084,19 @@ pub const FuncGen = struct { } } return array_ptr; - } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { + } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(target)); + vector.setAlignment(elem_ty.abiAlignment(mod)); return vector; } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8073,12 +8124,12 @@ pub const FuncGen = struct { if (operand_is_ref) { const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(target)); + load_inst.setAlignment(operand_ty.abiAlignment(mod)); return load_inst; } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8089,7 +8140,7 @@ pub const FuncGen = struct { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8118,12 +8169,13 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; + const mod = self.dg.module; const func = self.dg.decl.getFunction().?; - const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; + const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, - func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args + func.getParamName(mod, src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, try self.dg.object.lowerDebugType(inst_ty, .full), @@ -8134,10 +8186,10 @@ pub const FuncGen = struct { const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); const insert_block = self.builder.getInsertBlock(); - if (isByRef(inst_ty)) { + if (isByRef(inst_ty, mod)) { _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = inst_ty.abiAlignment(mod); const alloca = self.buildAlloca(arg_val.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_val, alloca); store_inst.setAlignment(alignment); @@ -8153,22 +8205,22 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const pointee_type = ptr_ty.childType(); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); - const target = self.dg.module.getTarget(); - const alignment = ptr_ty.ptrAlignment(target); + const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); - const target = self.dg.module.getTarget(); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target)); + return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } /// Use this instead of builder.buildAlloca, because this function makes sure to @@ -8182,8 +8234,9 @@ pub const FuncGen = struct { const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); + const mod = self.dg.module; - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8193,13 +8246,12 @@ pub const FuncGen = struct { u8_llvm_ty.constInt(0xaa, .False) else u8_llvm_ty.getUndef(); - const target = self.dg.module.getTarget(); - const operand_size = operand_ty.abiSize(target); + const operand_size = operand_ty.abiSize(mod); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); - if (safety and self.dg.module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8230,6 +8282,7 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.air.typeOf(ty_op.operand); @@ -8237,7 +8290,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(ptr_info.pointee_type)) break :elide; + if (!isByRef(ptr_info.pointee_type, mod)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -8261,8 +8314,9 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; + const mod = self.dg.module; const llvm_usize = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 return llvm_usize.constNull(); @@ -8301,6 +8355,7 @@ pub const FuncGen = struct { } fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -8310,7 +8365,7 @@ pub const FuncGen = struct { const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); } else { @@ -8336,7 +8391,7 @@ pub const FuncGen = struct { } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } @@ -8347,13 +8402,14 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.air.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); - const is_signed_int = operand_ty.isSignedInt(); + const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); @@ -8402,17 +8458,17 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.air.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false); - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_info.alignment(target); + const ptr_alignment = ptr_info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile"); const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8436,17 +8492,18 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: llvm.AtomicOrdering, ) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { element = self.builder.buildSExt(element, abi_ty, ""); } else { element = self.builder.buildZExt(element, abi_ty, ""); @@ -8457,18 +8514,19 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = self.air.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(); - if (self.air.value(bin_op.rhs)) |elem_val| { + if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8503,7 +8561,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -8551,9 +8609,9 @@ pub const FuncGen = struct { _ = self.builder.buildCondBr(end, body_block, end_block); self.builder.positionBuilderAtEnd(body_block); - const elem_abi_alignment = elem_ty.abiAlignment(target); + const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { _ = self.builder.buildMemCpy( it_ptr, it_ptr_alignment, @@ -8589,13 +8647,13 @@ pub const FuncGen = struct { const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); + const mod = self.dg.module; const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); - const target = self.dg.module.getTarget(); _ = self.builder.buildMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(target), + dest_ptr_ty.ptrAlignment(mod), src_ptr, - src_ptr_ty.ptrAlignment(target), + src_ptr_ty.ptrAlignment(mod), len, is_volatile, ); @@ -8603,10 +8661,10 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.air.typeOf(bin_op.lhs).childType(); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -8624,13 +8682,13 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty)) { + if (isByRef(un_ty, mod)) { const llvm_un_ty = try self.dg.lowerType(un_ty); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); @@ -8666,6 +8724,7 @@ pub const FuncGen = struct { } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8679,9 +8738,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8692,6 +8750,7 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8704,9 +8763,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8717,10 +8775,10 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); - var bits = operand_ty.intInfo(target).bits; + var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); @@ -8730,7 +8788,7 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); - if (operand_ty.zigTypeTag() == .Vector) { + if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); @@ -8759,7 +8817,7 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const result_bits = result_ty.intInfo(target).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8770,6 +8828,7 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); @@ -8781,7 +8840,7 @@ pub const FuncGen = struct { const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); for (names) |name| { - const err_int = self.dg.module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -8841,8 +8900,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); @@ -8923,11 +8981,9 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -9057,6 +9113,7 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9077,11 +9134,11 @@ pub const FuncGen = struct { for (values, 0..) |*val, i| { var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.dg.module, i, &buf); + const elem = mask.elemValueBuffer(mod, i, &buf); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { - const int = elem.toSignedInt(self.dg.module.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); val.* = llvm_i32.constInt(unsigned, .False); } @@ -9157,7 +9214,8 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); @@ -9168,21 +9226,21 @@ pub const FuncGen = struct { .And => return self.builder.buildAndReduce(operand), .Or => return self.builder.buildOrReduce(operand), .Xor => return self.builder.buildXorReduce(operand), - .Min => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()), + .Min => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMinReduce(operand); }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()), + .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMaxReduce(operand); }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9191,7 +9249,7 @@ pub const FuncGen = struct { }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9247,9 +9305,9 @@ pub const FuncGen = struct { const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Vector => { const llvm_u32 = self.context.intType(32); @@ -9265,7 +9323,7 @@ pub const FuncGen = struct { if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -9273,12 +9331,12 @@ pub const FuncGen = struct { var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9296,24 +9354,24 @@ pub const FuncGen = struct { var ptr_ty_buf: Type.Payload.Pointer = undefined; - if (isByRef(result_ty)) { + if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = self.air.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, target), + .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, }; @@ -9325,20 +9383,20 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; } }, .Array => { - assert(isByRef(result_ty)); + assert(isByRef(result_ty, mod)); const llvm_usize = try self.dg.lowerType(Type.usize); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(); var elem_ptr_payload: Type.Payload.Pointer = .{ @@ -9379,22 +9437,22 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.air.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); - const target = self.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const big_bits = union_ty.bitSize(target); + const big_bits = union_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9412,16 +9470,16 @@ pub const FuncGen = struct { const tag_val = Value.initPayload(&tag_val_payload.base); var int_payload: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); - break :blk tag_int_val.toUnsignedInt(target); + break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return null; } - assert(!isByRef(union_ty)); + assert(!isByRef(union_ty, mod)); return union_llvm_ty.constInt(tag_int, .False); } - assert(isByRef(union_ty)); + assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -9431,12 +9489,12 @@ pub const FuncGen = struct { assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try self.dg.lowerType(field.ty); - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const llvm_union_ty = t: { const payload = p: { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p self.context.intType(8).arrayType(padding_len); } @@ -9511,7 +9569,7 @@ pub const FuncGen = struct { const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target)); + store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); } return result_ptr; @@ -9535,7 +9593,8 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { .x86_64, @@ -9658,8 +9717,9 @@ pub const FuncGen = struct { return table; } + const mod = self.dg.module; const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); + const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -9703,14 +9763,14 @@ pub const FuncGen = struct { ) !*llvm.Value { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); + const mod = fg.dg.module; - if (isByRef(opt_ty)) { + if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); - const target = fg.dg.module.getTarget(); - const payload_alignment = payload_ty.abiAlignment(target); - if (isByRef(payload_ty)) { + const payload_alignment = payload_ty.abiAlignment(mod); + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; @@ -9722,7 +9782,7 @@ pub const FuncGen = struct { return load_inst; } - assert(!isByRef(payload_ty)); + assert(!isByRef(payload_ty, mod)); return fg.builder.buildExtractValue(opt_handle, 0, ""); } @@ -9734,10 +9794,10 @@ pub const FuncGen = struct { ) !?*llvm.Value { const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const mod = self.dg.module; - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const payload_alignment = optional_ty.abiAlignment(target); + if (isByRef(optional_ty, mod)) { + const payload_alignment = optional_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); { @@ -9765,9 +9825,9 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const target = self.dg.object.target; const struct_ty = struct_ptr_ty.childType(); - switch (struct_ty.zigTypeTag()) { + const mod = self.dg.module; + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.air.typeOfIndex(inst); @@ -9783,7 +9843,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target); + const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); const llvm_usize = try self.dg.lowerType(Type.usize); @@ -9795,7 +9855,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| { return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); } else { // If we found no index then this means this is a zero sized field at the @@ -9803,14 +9863,14 @@ pub const FuncGen = struct { // the index to the element at index `1` to get a pointer to the end of // the struct. const llvm_u32 = self.context.intType(32); - const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); @@ -9835,12 +9895,12 @@ pub const FuncGen = struct { ptr_alignment: u32, is_volatile: bool, ) !*llvm.Value { + const mod = fg.dg.module; const pointee_llvm_ty = try fg.dg.lowerType(pointee_type); - const target = fg.dg.module.getTarget(); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target)); + const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits); - const size_bytes = pointee_type.abiSize(target); + const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, result_align, @@ -9856,11 +9916,11 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { + const mod = self.dg.module; const info = ptr_ty.ptrInfo().data; - if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null; + if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; - const target = self.dg.module.getTarget(); - const ptr_alignment = info.alignment(target); + const ptr_alignment = info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); assert(info.vector_index != .runtime); @@ -9877,7 +9937,7 @@ pub const FuncGen = struct { } if (info.host_size == 0) { - if (isByRef(info.pointee_type)) { + if (isByRef(info.pointee_type, mod)) { return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile"); } const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9892,13 +9952,13 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); - if (isByRef(info.pointee_type)) { - const result_align = info.pointee_type.abiAlignment(target); + if (isByRef(info.pointee_type, mod)) { + const result_align = info.pointee_type.abiAlignment(mod); const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); const same_size_int = self.context.intType(elem_bits); @@ -9908,13 +9968,13 @@ pub const FuncGen = struct { return result_ptr; } - if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) { + if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } - if (info.pointee_type.isPtrAtRuntime()) { + if (info.pointee_type.isPtrAtRuntime(mod)) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -9932,11 +9992,11 @@ pub const FuncGen = struct { ) !void { const info = ptr_ty.ptrInfo().data; const elem_ty = info.pointee_type; - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + const mod = self.dg.module; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_ty.ptrAlignment(target); + const ptr_alignment = ptr_ty.ptrAlignment(mod); const ptr_volatile = llvm.Bool.fromBool(info.@"volatile"); assert(info.vector_index != .runtime); @@ -9964,13 +10024,13 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = self.context.intType(elem_bits); - const value_bits = if (elem_ty.isPtrAtRuntime()) + const value_bits = if (elem_ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(elem, value_bits_type, "") else self.builder.buildBitCast(elem, value_bits_type, ""); @@ -9991,7 +10051,7 @@ pub const FuncGen = struct { store_inst.setVolatile(ptr_volatile); return; } - if (!isByRef(elem_ty)) { + if (!isByRef(elem_ty, mod)) { const store_inst = self.builder.buildStore(elem, ptr); store_inst.setOrdering(ordering); store_inst.setAlignment(ptr_alignment); @@ -9999,13 +10059,13 @@ pub const FuncGen = struct { return; } assert(ordering == .NotAtomic); - const size_bytes = elem_ty.abiSize(target); + const size_bytes = elem_ty.abiSize(mod); _ = self.builder.buildMemCpy( ptr, ptr_alignment, elem, - elem_ty.abiAlignment(target), - self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), + elem_ty.abiAlignment(mod), + self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), info.@"volatile", ); } @@ -10030,11 +10090,12 @@ pub const FuncGen = struct { a4: *llvm.Value, a5: *llvm.Value, ) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); + const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -10451,7 +10512,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ fn llvmFieldIndex( ty: Type, field_index: usize, - target: std.Target, + mod: *const Module, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { // Detects where we inserted extra padding fields so that we can skip @@ -10464,9 +10525,9 @@ fn llvmFieldIndex( const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10488,7 +10549,7 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } return null; } @@ -10496,10 +10557,10 @@ fn llvmFieldIndex( assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, layout); + const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10521,43 +10582,44 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } else { // We did not find an llvm field that corresponds to this zig field. return null; } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false; +fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool { + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; + const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type), + .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, target), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type, mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, target), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - .Stdcall => return !isScalar(fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type), else => return false, } } -fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, target, .ret); +fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool { + const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -10567,20 +10629,21 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { + const mod = dg.module; + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError()) { + if (fn_info.return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); } } - const target = dg.module.getTarget(); + const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type)) { + if (isByRef(fn_info.return_type, mod)) { return dg.context.voidType(); } else { return dg.lowerType(fn_info.return_type); @@ -10594,33 +10657,33 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, target); + const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target); - const abi_size = scalar_type.abiSize(target); + const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) { + switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .float_array => return dg.lowerType(fn_info.return_type), .byval => return dg.lowerType(fn_info.return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); @@ -10631,10 +10694,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, target)) { + switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10654,7 +10717,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { return dg.context.voidType(); @@ -10665,13 +10728,13 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - const target = dg.module.getTarget(); - switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { + const mod = dg.module; + switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { .integer => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, @@ -10683,11 +10746,11 @@ fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.T } fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (isScalar(fn_info.return_type)) { + const mod = dg.module; + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const target = dg.module.getTarget(); - const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret); + const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10728,7 +10791,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10739,7 +10802,6 @@ const ParamTypeIterator = struct { fn_info: Type.Payload.Function.Data, zig_index: u32, llvm_index: u32, - target: std.Target, llvm_types_len: u32, llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, @@ -10779,7 +10841,10 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + const mod = it.dg.module; + const target = mod.getTarget(); + + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { it.zig_index += 1; return .no_bits; } @@ -10788,10 +10853,10 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) { it.llvm_index += 1; return .slice; - } else if (isByRef(ty)) { + } else if (isByRef(ty, mod)) { return .byref; } else { return .byval; @@ -10801,23 +10866,23 @@ const ParamTypeIterator = struct { @panic("TODO implement async function lowering in the LLVM backend"); }, .C => { - switch (it.target.cpu.arch) { + switch (target.cpu.arch) { .mips, .mipsel => { it.zig_index += 1; it.llvm_index += 1; return .byval; }, - .x86_64 => switch (it.target.os.tag) { + .x86_64 => switch (target.os.tag) { .windows => return it.nextWin64(ty), else => return it.nextSystemV(ty), }, .wasm32 => { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, it.target); + const classes = wasm_c_abi.classifyType(ty, mod); if (classes[0] == .indirect) { return .byref; } @@ -10826,7 +10891,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, it.target)) { + switch (aarch64_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -10841,7 +10906,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, it.target, .arg)) { + switch (arm_c_abi.classifyType(ty, mod, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -10857,7 +10922,7 @@ const ParamTypeIterator = struct { if (ty.tag() == .f16) { return .as_u16; } - switch (riscv_c_abi.classifyType(ty, it.target)) { + switch (riscv_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -10878,7 +10943,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } else { it.byval_attr = true; @@ -10894,9 +10959,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - switch (x86_64_abi.classifyWindows(ty, it.target)) { + const mod = it.dg.module; + switch (x86_64_abi.classifyWindows(ty, mod)) { .integer => { - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10926,14 +10992,15 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { - const classes = x86_64_abi.classifySystemV(ty, it.target, .arg); + const mod = it.dg.module; + const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10992,7 +11059,6 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, - .target = dg.module.getTarget(), .llvm_types_buffer = undefined, .llvm_types_len = 0, .byval_attr = false, @@ -11001,16 +11067,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - target: std.Target, + mod: *const Module, ty: Type, ) ?std.builtin.Signedness { + const target = mod.getTarget(); switch (cc) { .Unspecified, .Inline, .Async => return null, else => {}, } - const int_info = switch (ty.zigTypeTag()) { - .Bool => Type.u1.intInfo(target), - .Int, .Enum, .ErrorSet => ty.intInfo(target), + const int_info = switch (ty.zigTypeTag(mod)) { + .Bool => Type.u1.intInfo(mod), + .Int, .Enum, .ErrorSet => ty.intInfo(mod), else => return null, }; if (int_info.bits <= 16) return int_info.signedness; @@ -11039,12 +11106,12 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type) bool { +fn isByRef(ty: Type, mod: *const Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11067,7 +11134,7 @@ fn isByRef(ty: Type) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(), + .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout() == .Packed) return false; @@ -11075,32 +11142,32 @@ fn isByRef(ty: Type) bool { const tuple = ty.tupleFields(); var count: usize = 0; for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i])) return true; + if (isByRef(tuple.types[i], mod)) return true; } return false; } var count: usize = 0; const fields = ty.structFields(); for (fields.values()) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(field.ty)) return true; + if (isByRef(field.ty, mod)) return true; } return false; }, .Union => switch (ty.containerLayout()) { .Packed => return false, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; @@ -11108,10 +11175,10 @@ fn isByRef(ty: Type) bool { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return false; } return true; @@ -11119,8 +11186,8 @@ fn isByRef(ty: Type) bool { } } -fn isScalar(ty: Type) bool { - return switch (ty.zigTypeTag()) { +fn isScalar(mod: *const Module, ty: Type) bool { + return switch (ty.zigTypeTag(mod)) { .Void, .Bool, .NoReturn, @@ -11304,12 +11371,12 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 09ace669a9..b8c8466427 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -231,9 +231,10 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - if (self.air.value(inst)) |val| { + const mod = self.module; + if (self.air.value(inst, mod)) |val| { const ty = self.air.typeOf(inst); - if (ty.zigTypeTag() == .Fn) { + if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, .function => val.castTag(.function).?.data.owner_decl, @@ -340,8 +341,9 @@ pub const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { + const mod = self.module; const target = self.getTarget(); - return switch (ty.zigTypeTag()) { + return switch (ty.zigTypeTag(mod)) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .is_vector = false, @@ -355,7 +357,7 @@ pub const DeclGen = struct { .class = .float, }, .Int => blk: { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ @@ -533,21 +535,22 @@ pub const DeclGen = struct { } fn addInt(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); - const int_info = ty.intInfo(target); + const mod = self.dg.module; + const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(target)), - .unsigned => val.toUnsignedInt(target), + .signed => @bitCast(u64, val.toSignedInt(mod)), + .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. - const len = ty.abiSize(target); + const len = ty.abiSize(mod); try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { + const mod = self.dg.module; const target = self.dg.getTarget(); - const len = ty.abiSize(target); + const len = ty.abiSize(mod); // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { @@ -607,15 +610,15 @@ pub const DeclGen = struct { } fn lower(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); const dg = self.dg; + const mod = dg.module; if (val.isUndef()) { - const size = ty.abiSize(target); + const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => try self.addInt(ty, val), .Float => try self.addFloat(ty, val), .Bool => try self.addConstBool(val.toBool()), @@ -644,7 +647,7 @@ pub const DeclGen = struct { const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); if (ty.sentinel()) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target))); + try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, .bytes => { @@ -690,13 +693,13 @@ pub const DeclGen = struct { const struct_begin = self.size; const field_vals = val.castTag(.aggregate).?.data; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; try self.lower(field.ty, field_vals[i]); // Add padding if required. // TODO: Add to type generation as well? const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, target); + const padded_field_end = ty.structFieldOffset(i + 1, mod); const padding = padded_field_end - unpadded_field_end; try self.addUndef(padding); } @@ -705,13 +708,13 @@ pub const DeclGen = struct { .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - const has_payload = !val.isNull(); - const abi_size = ty.abiSize(target); + const has_payload = !val.isNull(mod); + const abi_size = ty.abiSize(mod); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try self.addConstBool(has_payload); return; - } else if (ty.optionalReprIsPayload()) { + } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. if (val.castTag(.opt_payload)) |payload| { try self.lower(payload_ty, payload.data); @@ -729,7 +732,7 @@ pub const DeclGen = struct { // Subtract 1 for @sizeOf(bool). // TODO: Make this not hardcoded. - const payload_size = payload_ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; if (val.castTag(.opt_payload)) |payload| { @@ -744,14 +747,13 @@ pub const DeclGen = struct { var int_val_buffer: Value.Payload.U64 = undefined; const int_val = val.enumToInt(ty, &int_val_buffer); - var int_ty_buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&int_ty_buffer); + const int_ty = ty.intTagType(); try self.lower(int_ty, int_val); }, .Union => { const tag_and_val = val.castTag(.@"union").?.data; - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); @@ -772,9 +774,9 @@ pub const DeclGen = struct { try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); } - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { try self.lower(active_field_ty, tag_and_val.val); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -808,9 +810,9 @@ pub const DeclGen = struct { return try self.lower(Type.anyerror, error_val); } - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiAlignment(target); - const ty_size = ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); const padding = ty_size - payload_size - error_size; const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); @@ -886,7 +888,7 @@ pub const DeclGen = struct { // .id_result = result_id, // .storage_class = storage_class, // }); - // } else if (ty.abiSize(target) == 0) { + // } else if (ty.abiSize(mod) == 0) { // // Special case: if the type has no size, then return an undefined pointer. // return try section.emit(self.spv.gpa, .OpUndef, .{ // .id_result_type = self.typeId(ptr_ty_ref), @@ -968,6 +970,7 @@ pub const DeclGen = struct { /// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default. /// This function should only be called during function code generation. fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { + const mod = self.module; const target = self.getTarget(); const result_ty_ref = try self.resolveType(ty, repr); @@ -977,12 +980,12 @@ pub const DeclGen = struct { return self.spv.constUndef(result_ty_ref); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => { - if (ty.isSignedInt()) { - return try self.spv.constInt(result_ty_ref, val.toSignedInt(target)); + if (ty.isSignedInt(mod)) { + return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod)); } else { - return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target)); + return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod)); } }, .Bool => switch (repr) { @@ -1037,7 +1040,7 @@ pub const DeclGen = struct { // The value cannot be generated directly, so generate it as an indirect constant, // and then perform an OpLoad. const result_id = self.spv.allocId(); - const alignment = ty.abiAlignment(target); + const alignment = ty.abiAlignment(mod); const spv_decl_index = try self.spv.allocDecl(.global); try self.lowerIndirectConstant( @@ -1114,8 +1117,8 @@ pub const DeclGen = struct { /// NOTE: When the active field is set to something other than the most aligned field, the /// resulting struct will be *underaligned*. fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { - const target = self.getTarget(); - const layout = ty.unionGetLayout(target); + const mod = self.module; + const layout = ty.unionGetLayout(mod); const union_ty = ty.cast(Type.Payload.Union).?.data; if (union_ty.layout == .Packed) { @@ -1143,11 +1146,11 @@ pub const DeclGen = struct { const active_field = maybe_active_field orelse layout.most_aligned_field; const active_field_ty = union_ty.fields.values()[active_field].ty; - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect); member_types.appendAssumeCapacity(active_payload_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload")); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -1177,21 +1180,21 @@ pub const DeclGen = struct { /// Turn a Zig type into a SPIR-V Type, and return a reference to it. fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef { + const mod = self.module; log.debug("resolveType: ty = {}", .{ty.fmt(self.module)}); const target = self.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return try self.spv.resolve(.void_type), .Bool => switch (repr) { .direct => return try self.spv.resolve(.bool_type), .indirect => return try self.intType(.unsigned, 1), }, .Int => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buffer); + const tag_ty = ty.intTagType(); return self.resolveType(tag_ty, repr); }, .Float => { @@ -1290,7 +1293,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1315,7 +1318,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]); @@ -1334,7 +1337,7 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1342,7 +1345,7 @@ pub const DeclGen = struct { } const payload_ty_ref = try self.resolveType(payload_ty, .indirect); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { // Optional is actually a pointer or a slice. return payload_ty_ref; } @@ -1445,14 +1448,14 @@ pub const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const target = self.getTarget(); + const mod = self.module; - const error_align = Type.anyerror.abiAlignment(target); - const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_align = payload_ty.abiAlignment(mod); const error_first = error_align > payload_align; return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), .error_first = error_first, }; } @@ -1529,14 +1532,15 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const decl = self.module.declPtr(self.decl_index); + const mod = self.module; + const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); if (decl.val.castTag(.function)) |_| { - assert(decl.ty.zigTypeTag() == .Fn); + assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), @@ -1634,7 +1638,8 @@ pub const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const direct_bool_ty_ref = try self.resolveType(ty, .direct); const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); @@ -1655,7 +1660,8 @@ pub const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); break :blk self.boolToInt(indirect_bool_ty_ref, operand_id); @@ -2056,6 +2062,7 @@ pub const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; if (self.liveness.isUnused(inst)) return null; const ty = self.air.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -2083,7 +2090,7 @@ pub const DeclGen = struct { if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { - const int = elem.toSignedInt(self.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } @@ -2189,13 +2196,13 @@ pub const DeclGen = struct { lhs_id: IdRef, rhs_id: IdRef, ) !IdRef { + const mod = self.module; var cmp_lhs_id = lhs_id; var cmp_rhs_id = rhs_id; const opcode: Opcode = opcode: { - var int_buffer: Type.Payload.Bits = undefined; - const op_ty = switch (ty.zigTypeTag()) { + const op_ty = switch (ty.zigTypeTag(mod)) { .Int, .Bool, .Float => ty, - .Enum => ty.intTagType(&int_buffer), + .Enum => ty.intTagType(), .ErrorSet => Type.u16, .Pointer => blk: { // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are @@ -2303,13 +2310,14 @@ pub const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { + const mod = self.module; const dst_ty_ref = try self.resolveType(dst_ty, .direct); const result_id = self.spv.allocId(); // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) { + if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = self.typeId(dst_ty_ref), .id_result = result_id, @@ -2342,8 +2350,8 @@ pub const DeclGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); - const target = self.getTarget(); - const dest_info = dest_ty.intInfo(target); + const mod = self.module; + const dest_info = dest_ty.intInfo(mod); // TODO: Masking? @@ -2485,8 +2493,9 @@ pub const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { + const mod = self.module; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T. + const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); if (ptr_ty.isSinglePointer()) { @@ -2502,12 +2511,13 @@ pub const DeclGen = struct { fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2536,8 +2546,8 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const mod = self.module; + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolve(ty_op.operand); @@ -2551,6 +2561,7 @@ pub const DeclGen = struct { fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -2559,9 +2570,9 @@ pub const DeclGen = struct { const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet. + assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet. return try self.extractField(field_ty, object_id, field_index); } @@ -2573,8 +2584,9 @@ pub const DeclGen = struct { object_ptr: IdRef, field_index: u32, ) !?IdRef { + const mod = self.module; const object_ty = object_ptr_ty.childType(); - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO else => { @@ -2667,6 +2679,7 @@ pub const DeclGen = struct { // the current block by first generating the code of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. + const mod = self.module; const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. @@ -2690,7 +2703,7 @@ pub const DeclGen = struct { try self.beginSpvBlock(label_id); // If this block didn't produce a value, simply return here. - if (!ty.hasRuntimeBitsIgnoreComptime()) + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return null; // Combine the result from the blocks using the Phi instruction. @@ -2716,7 +2729,8 @@ pub const DeclGen = struct { const block = self.blocks.get(br.block_inst).?; const operand_ty = self.air.typeOf(br.operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); @@ -2771,13 +2785,14 @@ pub const DeclGen = struct { } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -2805,7 +2820,8 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; const operand_ty = self.air.typeOf(operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { @@ -2814,11 +2830,12 @@ pub const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); return; } @@ -2946,6 +2963,7 @@ pub const DeclGen = struct { fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); const optional_ty = self.air.typeOf(un_op); @@ -2955,7 +2973,7 @@ pub const DeclGen = struct { const bool_ty_ref = try self.resolveType(Type.bool, .direct); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2985,7 +3003,7 @@ pub const DeclGen = struct { return result_id; } - const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime()) + const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -3009,14 +3027,15 @@ pub const DeclGen = struct { fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3026,16 +3045,17 @@ pub const DeclGen = struct { fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3045,30 +3065,29 @@ pub const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const target = self.getTarget(); + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); const cond_ty = self.air.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const cond_words: u32 = switch (cond_ty.zigTypeTag()) { + const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { .Int => blk: { - const bits = cond_ty.intInfo(target).bits; + const bits = cond_ty.intInfo(mod).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - var buffer: Type.Payload.Bits = undefined; - const int_ty = cond_ty.intTagType(&buffer); - const int_info = int_ty.intInfo(target); + const int_ty = cond_ty.intTagType(); + const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, - else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers. + else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers. }; const num_cases = switch_br.data.cases_len; @@ -3112,15 +3131,15 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item) orelse { + const value = self.air.value(item, mod) orelse { return self.todo("switch on runtime value???", .{}); }; - const int_val = switch (cond_ty.zigTypeTag()) { - .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target), + const int_val = switch (cond_ty.zigTypeTag(mod)) { + .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants + break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; @@ -3294,11 +3313,12 @@ pub const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, @@ -3320,7 +3340,7 @@ pub const DeclGen = struct { // temporary params buffer. const arg_id = try self.resolve(arg); const arg_ty = self.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; n_params += 1; @@ -3337,7 +3357,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 62a208406e..6117f1c1de 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In }, }; - const required_alignment = tv.ty.abiAlignment(self.base.options.target); + const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = @intCast(u32, code.len); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); @@ -1299,7 +1299,8 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom. fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { if (val.isUndefDeep()) { @@ -1330,7 +1331,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple defer gpa.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1d358a29ab..9c6e54ea98 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -169,16 +169,16 @@ pub const DeclState = struct { fn addDbgInfoType( self: *DeclState, - module: *Module, + mod: *Module, atom_index: Atom.Index, ty: Type, ) error{OutOfMemory}!void { const arena = self.abbrev_type_arena.allocator(); const dbg_info_buffer = &self.dbg_info; - const target = module.getTarget(); + const target = mod.getTarget(); const target_endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .NoReturn => unreachable, .Void => { try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1)); @@ -189,12 +189,12 @@ pub const DeclState = struct { // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 @@ -203,20 +203,20 @@ pub const DeclState = struct { .unsigned => DW.ATE.unsigned, }); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } var buf = try arena.create(Type.Payload.ElemType); @@ -224,10 +224,10 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = ty.abiSize(target); + const abi_size = ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -251,7 +251,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const offset = abi_size - payload_ty.abiSize(target); + const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); @@ -266,9 +266,9 @@ pub const DeclState = struct { try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(5); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -311,7 +311,7 @@ pub const DeclState = struct { // DW.AT.array_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -332,12 +332,12 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); switch (ty.tag()) { .tuple, .anon_struct => { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); const fields = ty.tupleFields(); for (fields.types, 0..) |field, field_index| { @@ -350,13 +350,13 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, else => { // DW.AT.name, DW.FORM.string - const struct_name = try ty.nameAllocArena(arena, module); + const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -370,7 +370,7 @@ pub const DeclState = struct { const fields = ty.structFields(); for (fields.keys(), 0..) |field_name, field_index| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -382,7 +382,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, @@ -395,9 +395,9 @@ pub const DeclState = struct { // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - const enum_name = try ty.nameAllocArena(arena, module); + const enum_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -424,7 +424,7 @@ pub const DeclState = struct { // See https://github.com/ziglang/zig/issues/645 var int_buffer: Value.Payload.U64 = undefined; const field_int_val = value.enumToInt(ty, &int_buffer); - break :value @bitCast(u64, field_int_val.toSignedInt(target)); + break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -433,12 +433,12 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, .Union => { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); const union_obj = ty.cast(Type.Payload.Union).?.data; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; - const union_name = try ty.nameAllocArena(arena, module); + const union_name = try ty.nameAllocArena(arena, mod); // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated @@ -481,7 +481,7 @@ pub const DeclState = struct { const fields = ty.unionFields(); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -517,7 +517,7 @@ pub const DeclState = struct { .ErrorSet => { try addDbgInfoErrorSet( self.abbrev_type_arena.allocator(), - module, + mod, ty, target, &self.dbg_info, @@ -526,18 +526,18 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_size = ty.abiSize(target); - const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; - const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); + const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_size = ty.abiSize(mod); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); if (!payload_ty.isNoReturn()) { @@ -685,7 +685,8 @@ pub const DeclState = struct { const atom_index = self.di_atom_decls.get(owner_decl).?; const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@enumToInt(AbbrevKind.variable)); - const target = self.mod.getTarget(); + const mod = self.mod; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); const child_ty = if (is_ptr) ty.childType() else ty; @@ -790,9 +791,9 @@ pub const DeclState = struct { const fixup = dbg_info.items.len; dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, - if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu, + if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); - if (child_ty.isSignedInt()) { + if (child_ty.isSignedInt(mod)) { try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); } else { try leb128.writeULEB128(dbg_info.writer(), x); @@ -805,7 +806,7 @@ pub const DeclState = struct { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(target)); + const abi_size = @intCast(u32, child_ty.abiSize(mod)); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -979,7 +980,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); @@ -1027,7 +1028,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); } else { @@ -1059,7 +1060,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) pub fn commitDeclState( self: *Dwarf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, sym_addr: u64, sym_size: u64, @@ -1071,12 +1072,12 @@ pub fn commitDeclState( const gpa = self.allocator; var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const target_endian = self.target.cpu.arch.endian(); assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { // Since the Decl is a function, we need to update the .debug_line program. // Perform the relocations based on vaddr. @@ -1283,7 +1284,7 @@ pub fn commitDeclState( if (deferred) continue; symbol.offset = @intCast(u32, dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(module, di_atom_index, ty); + try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } @@ -1319,7 +1320,7 @@ pub fn commitDeclState( reloc.offset, value, target, - ty.fmt(module), + ty.fmt(mod), }); mem.writeInt( u32, @@ -2663,7 +2664,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { fn addDbgInfoErrorSet( arena: Allocator, - module: *Module, + mod: *Module, ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), @@ -2673,10 +2674,10 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = Type.anyerror.abiSize(target); + const abi_size = Type.anyerror.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.enumerator @@ -2691,7 +2692,7 @@ fn addDbgInfoErrorSet( const error_names = ty.errorSetNames(); for (error_names) |error_name| { - const kv = module.getErrorValue(error_name) catch unreachable; + const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 2a28f880ac..7bd36a9b60 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2449,9 +2449,10 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I } fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { if (val.isUndefDeep()) { @@ -2482,7 +2483,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s defer self.base.allocator.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const required_alignment = typed_value.ty.abiAlignment(mod); const shdr_index = self.rodata_section_index.?; const phdr_index = self.sections.items(.phdr_index)[shdr_index]; const local_sym = self.getAtom(atom_index).getSymbolPtr(self); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a346ec756f..306661c5c5 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1948,7 +1948,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const mod = self.base.options.module.?; + const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; // TODO: work out logic for disambiguating functions from function pointers @@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In } fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { + const mod = self.base.options.module.?; // Lowering a TLV on macOS involves two stages: // 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss) // 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars @@ -2202,7 +2204,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D }, }; - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(module); defer gpa.free(decl_name); @@ -2262,7 +2264,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; const val = decl.val; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const mode = self.base.options.optimize_mode; const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { @@ -2301,7 +2304,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 6d74e17dfd..7a389a789d 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -432,8 +432,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) } /// called at the end of update{Decl,Func} fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { - const decl = self.base.options.module.?.declPtr(decl_index); - const is_fn = (decl.ty.zigTypeTag() == .Fn); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; @@ -704,7 +705,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); const code = blk: { - const is_fn = source_decl.ty.zigTypeTag() == .Fn; + const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; const output = table.get(source_decl_index).?; @@ -1031,7 +1032,7 @@ pub fn getDeclVAddr( ) !u64 { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); while (it_file.next()) |fentry| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index befd2d68c9..0154207368 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1473,7 +1473,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 atom.size = @intCast(u32, code.len); if (code.len == 0) return; - atom.alignment = decl.ty.abiAlignment(wasm.base.options.target); + atom.alignment = decl.ty.abiAlignment(mod); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1523,9 +1523,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { - assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions - const mod = wasm.base.options.module.?; + assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); // Create and initialize a new local symbol and atom @@ -1543,7 +1542,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const code = code: { const atom = wasm.getAtomPtr(atom_index); - atom.alignment = tv.ty.abiAlignment(wasm.base.options.target); + atom.alignment = tv.ty.abiAlignment(mod); wasm.symbols.items[atom.sym_index] = .{ .name = try wasm.string_table.put(wasm.base.allocator, name), .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), @@ -1632,7 +1631,7 @@ pub fn getDeclVAddr( const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const atom = wasm.getAtomPtr(atom_index); const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { assert(reloc_info.addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, // as function pointers are not allowed to be stored inside the data section. @@ -2933,7 +2932,8 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - atom.alignment = slice_ty.abiAlignment(wasm.base.options.target); + const mod = wasm.base.options.module.?; + atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table"); @@ -3000,7 +3000,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .offset = offset, .addend = @intCast(i32, addend), }); - atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target)); + atom.size += @intCast(u32, slice_ty.abiSize(mod)); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3369,7 +3369,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod if (decl.isExtern()) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); } else if (decl.getVariable()) |variable| { if (!variable.is_mutable) { diff --git a/src/print_air.zig b/src/print_air.zig index 2e8ab1a642..e8875ff018 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -7,6 +7,7 @@ const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); +const InternPool = @import("InternPool.zig"); pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void { const instruction_bytes = air.instructions.len * @@ -965,14 +966,13 @@ const Writer = struct { operand: Air.Inst.Ref, dies: bool, ) @TypeOf(s).Error!void { - var i: usize = @enumToInt(operand); + const i = @enumToInt(operand); - if (i < Air.Inst.Ref.typed_value_map.len) { + if (i < InternPool.static_len) { return s.print("@{}", .{operand}); } - i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); + return w.writeInstIndex(s, i - InternPool.static_len, dies); } fn writeInstIndex( diff --git a/src/print_zir.zig b/src/print_zir.zig index cfa68424d0..a2178bbb49 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -3,6 +3,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); @@ -2468,14 +2469,9 @@ const Writer = struct { } fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { - var i: usize = @enumToInt(ref); - - if (i < Zir.Inst.Ref.typed_value_map.len) { - return stream.print("@{}", .{ref}); - } - i -= Zir.Inst.Ref.typed_value_map.len; - - return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i)); + const i = @enumToInt(ref); + if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)}); + return self.writeInstIndex(stream, i - InternPool.static_len); } fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { diff --git a/src/target.zig b/src/target.zig index 5e66c8f417..c89f8ce92c 100644 --- a/src/target.zig +++ b/src/target.zig @@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows; } -pub const AtomicPtrAlignmentError = error{ - FloatTooBig, - IntTooBig, - BadType, -}; - -pub const AtomicPtrAlignmentDiagnostics = struct { - bits: u16 = undefined, - max_bits: u16 = undefined, -}; - -/// If ABI alignment of `ty` is OK for atomic operations, returns 0. -/// Otherwise returns the alignment required on a pointer for the target -/// to perform atomic operations. -// TODO this function does not take into account CPU features, which can affect -// this value. Audit this! -pub fn atomicPtrAlignment( - target: std.Target, - ty: Type, - diags: *AtomicPtrAlignmentDiagnostics, -) AtomicPtrAlignmentError!u32 { - const max_atomic_bits: u16 = switch (target.cpu.arch) { - .avr, - .msp430, - .spu_2, - => 16, - - .arc, - .arm, - .armeb, - .hexagon, - .m68k, - .le32, - .mips, - .mipsel, - .nvptx, - .powerpc, - .powerpcle, - .r600, - .riscv32, - .sparc, - .sparcel, - .tce, - .tcele, - .thumb, - .thumbeb, - .x86, - .xcore, - .amdil, - .hsail, - .spir, - .kalimba, - .lanai, - .shave, - .wasm32, - .renderscript32, - .csky, - .spirv32, - .dxil, - .loongarch32, - .xtensa, - => 32, - - .amdgcn, - .bpfel, - .bpfeb, - .le64, - .mips64, - .mips64el, - .nvptx64, - .powerpc64, - .powerpc64le, - .riscv64, - .sparc64, - .s390x, - .amdil64, - .hsail64, - .spir64, - .wasm64, - .renderscript64, - .ve, - .spirv64, - .loongarch64, - => 64, - - .aarch64, - .aarch64_be, - .aarch64_32, - => 128, - - .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, - }; - - var buffer: Type.Payload.Bits = undefined; - - const int_ty = switch (ty.zigTypeTag()) { - .Int => ty, - .Enum => ty.intTagType(&buffer), - .Float => { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return 0; - }, - .Bool => return 0, - else => { - if (ty.isPtrAtRuntime()) return 0; - return error.BadType; - }, - }; - - const bit_count = int_ty.intInfo(target).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; - } - - return 0; -} - pub fn defaultAddressSpace( target: std.Target, context: enum { diff --git a/src/type.zig b/src/type.zig index e5b41e717b..259079a26c 100644 --- a/src/type.zig +++ b/src/type.zig @@ -9,27 +9,102 @@ const log = std.log.scoped(.Type); const target_util = @import("target.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); const file_struct = @This(); -/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. -/// It's important for this type to be small. -/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement -/// of obtaining a lock on a global type table, as well as making the -/// garbage collection bookkeeping simpler. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Type = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, - - pub fn zigTypeTag(ty: Type) std.builtin.TypeId { - return ty.zigTypeTagOrPoison() catch unreachable; - } +pub const Type = struct { + /// We are migrating towards using this for every Type object. However, many + /// types are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, + + pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; + } + + pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return .Int, + .ptr_type => return .Pointer, + .array_type => return .Array, + .vector_type => return .Vector, + .optional_type => return .Optional, + .error_union_type => return .ErrorUnion, + .struct_type => return .Struct, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + => return .Float, + + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + => return .Int, + + .anyopaque => return .Opaque, + .bool => return .Bool, + .void => return .Void, + .type => return .Type, + .anyerror => return .ErrorSet, + .comptime_int => return .ComptimeInt, + .comptime_float => return .ComptimeFloat, + .noreturn => return .NoReturn, + .@"anyframe" => return .AnyFrame, + .null => return .Null, + .undefined => return .Undefined, + .enum_literal => return .EnumLiteral, + + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + => return .Enum, + + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => return .Struct, + + .type_info => return .Union, + + .generic_poison => unreachable, + .var_args_param => unreachable, + }, - pub fn zigTypeTagOrPoison(ty: Type) error{GenericPoison}!std.builtin.TypeId { + .extern_func, + .int, + .enum_tag, + .simple_value, + => unreachable, // it's a value, not a type + } + } switch (ty.tag()) { .generic_poison => return error.GenericPoison, @@ -56,8 +131,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_signed, - .int_unsigned, => return .Int, .f16, @@ -85,10 +158,6 @@ pub const Type = extern union { .null => return .Null, .undefined => return .Undefined, - .fn_noreturn_no_args => return .Fn, - .fn_void_no_args => return .Fn, - .fn_naked_noreturn_no_args => return .Fn, - .fn_ccc_void_no_args => return .Fn, .function => return .Fn, .array, @@ -159,26 +228,26 @@ pub const Type = extern union { } } - pub fn baseZigTypeTag(self: Type) std.builtin.TypeId { - return switch (self.zigTypeTag()) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(), + pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(); + return self.optionalChild(&buf).baseZigTypeTag(mod); }, else => |t| t, }; } - pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag()) { + pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, => true, - .Vector => ty.elemType2().isSelfComparable(is_equality_cmp), + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), .Bool, .Type, @@ -205,44 +274,54 @@ pub const Type = extern union { .Optional => { if (!is_equality_cmp) return false; var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(is_equality_cmp); + return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); }, }; } pub fn initTag(comptime small_tag: Tag) Type { comptime assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Type { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } - pub fn tag(self: Type) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + pub fn tag(ty: Type) Tag { + assert(ty.ip_index == .none); + if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return ty.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return ty.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Type, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -251,11 +330,14 @@ pub const Type = extern union { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -285,10 +367,10 @@ pub const Type = extern union { } /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type) ?Type { - if (ty.zigTypeTag() != .Pointer) return null; + pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() != .Fn) return null; + if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } @@ -536,7 +618,10 @@ pub const Type = extern union { pub fn eql(a: Type, b: Type, mod: *Module) bool { // As a shortcut, if the small tags / addresses match, we're done. - if (a.tag_if_small_enough == b.tag_if_small_enough) return true; + if (a.ip_index != .none or b.ip_index != .none) { + return a.ip_index == b.ip_index; + } + if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { .generic_poison => unreachable, @@ -589,16 +674,11 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - if (b.zigTypeTag() != .Int) return false; + if (b.zigTypeTag(mod) != .Int) return false; if (b.isNamedInt()) return false; - - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. - const info_a = a.intInfo(@as(Target, undefined)); - const info_b = b.intInfo(@as(Target, undefined)); + const info_a = a.intInfo(mod); + const info_b = b.intInfo(mod); return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; }, @@ -641,13 +721,8 @@ pub const Type = extern union { return opaque_obj_a == opaque_obj_b; }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { - if (b.zigTypeTag() != .Fn) return false; + .function => { + if (b.zigTypeTag(mod) != .Fn) return false; const a_info = a.fnInfo(); const b_info = b.fnInfo(); @@ -699,7 +774,7 @@ pub const Type = extern union { .array_sentinel, .vector, => { - if (a.zigTypeTag() != b.zigTypeTag()) return false; + if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; if (a.arrayLen() != b.arrayLen()) return false; @@ -737,7 +812,7 @@ pub const Type = extern union { .manyptr_const_u8, .manyptr_const_u8_sentinel_0, => { - if (b.zigTypeTag() != .Pointer) return false; + if (b.zigTypeTag(mod) != .Pointer) return false; const info_a = a.ptrInfo().data; const info_b = b.ptrInfo().data; @@ -783,7 +858,7 @@ pub const Type = extern union { .optional_single_const_pointer, .optional_single_mut_pointer, => { - if (b.zigTypeTag() != .Optional) return false; + if (b.zigTypeTag(mod) != .Optional) return false; var buf_a: Payload.ElemType = undefined; var buf_b: Payload.ElemType = undefined; @@ -791,7 +866,7 @@ pub const Type = extern union { }, .anyerror_void_error_union, .error_union => { - if (b.zigTypeTag() != .ErrorUnion) return false; + if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); const b_set = b.errorUnionSet(); @@ -805,8 +880,8 @@ pub const Type = extern union { }, .anyframe_T => { - if (b.zigTypeTag() != .AnyFrame) return false; - return a.elemType2().eql(b.elemType2(), mod); + if (b.zigTypeTag(mod) != .AnyFrame) return false; + return a.elemType2(mod).eql(b.elemType2(mod), mod); }, .empty_struct => { @@ -941,6 +1016,9 @@ pub const Type = extern union { } pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { + if (ty.ip_index != .none) { + return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher); + } switch (ty.tag()) { .generic_poison => unreachable, @@ -1007,13 +1085,10 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. + // Arbitrary sized integers. std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(@as(Target, undefined)); + const info = ty.intInfo(mod); std.hash.autoHash(hasher, info.signedness); std.hash.autoHash(hasher, info.bits); }, @@ -1052,12 +1127,7 @@ pub const Type = extern union { std.hash.autoHash(hasher, opaque_obj); }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { + .function => { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); const fn_info = ty.fnInfo(); @@ -1275,9 +1345,15 @@ pub const Type = extern union { }; pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Type{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1, .u8, .i8, @@ -1317,10 +1393,6 @@ pub const Type = extern union { .noreturn, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -1370,13 +1442,12 @@ pub const Type = extern union { .base = .{ .tag = payload.base.tag }, .data = try payload.data.copy(allocator), }; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_signed, - .int_unsigned, - => return self.copyPayloadShallow(allocator, Payload.Bits), - .vector => { const payload = self.castTag(.vector).?.data; return Tag.vector.create(allocator, .{ @@ -1511,7 +1582,10 @@ pub const Type = extern union { const payload = self.cast(T).?; const new_payload = try allocator.create(T); new_payload.* = payload.*; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -1550,7 +1624,7 @@ pub const Type = extern union { } /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the target. + /// we also need access to the module. pub fn dump( start_type: Type, comptime unused_format_string: []const u8, @@ -1559,10 +1633,13 @@ pub const Type = extern union { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); + if (start_type.ip_index != .none) { + return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)}); + } if (true) { - // This is disabled to work around a bug where this function - // recursively causes more generic function instantiations - // resulting in an infinite loop in the compiler. + // This is disabled to work around a stage2 bug where this function recursively + // causes more generic function instantiations resulting in an infinite loop + // in the compiler. try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); return; } @@ -1656,10 +1733,6 @@ pub const Type = extern union { .anyerror_void_error_union => return writer.writeAll("anyerror!void"), .const_slice_u8 => return writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => return writer.writeAll("fn() noreturn"), - .fn_void_no_args => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), .manyptr_u8 => return writer.writeAll("[*]u8"), .manyptr_const_u8 => return writer.writeAll("[*]const u8"), @@ -1820,14 +1893,6 @@ pub const Type = extern union { ty = pointee_type; continue; }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -1938,6 +2003,26 @@ pub const Type = extern union { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => |s| return writer.writeAll(@tagName(s)), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, + }; const t = ty.tag(); switch (t) { .inferred_alloc_const => unreachable, @@ -2041,10 +2126,6 @@ pub const Type = extern union { .anyerror_void_error_union => try writer.writeAll("anyerror!void"), .const_slice_u8 => try writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => try writer.writeAll("fn() noreturn"), - .fn_void_no_args => try writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => try writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => try writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), .manyptr_u8 => try writer.writeAll("[*]u8"), .manyptr_const_u8 => try writer.writeAll("[*]const u8"), @@ -2200,7 +2281,7 @@ pub const Type = extern union { if (info.@"align" != 0) { try writer.print("align({d}", .{info.@"align"}); } else { - const alignment = info.pointee_type.abiAlignment(mod.getTarget()); + const alignment = info.pointee_type.abiAlignment(mod); try writer.print("align({d}", .{alignment}); } @@ -2224,14 +2305,6 @@ pub const Type = extern union { try print(info.pointee_type, writer, mod); }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -2317,10 +2390,6 @@ pub const Type = extern union { .noreturn => return Value.initTag(.noreturn_type), .null => return Value.initTag(.null_type), .undefined => return Value.initTag(.undefined_type), - .fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type), - .fn_void_no_args => return Value.initTag(.fn_void_no_args_type), - .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type), - .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), @@ -2360,9 +2429,24 @@ pub const Type = extern union { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, + mod: *const Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits != 0, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; switch (ty.tag()) { .u1, .u8, @@ -2440,12 +2524,12 @@ pub const Type = extern union { => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag() == .Fn) { + } else if (ty.childType().zigTypeTag(mod) == .Fn) { return !ty.childType().fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { - return !comptimeOnly(ty); + return !comptimeOnly(ty, mod); } }, @@ -2465,10 +2549,6 @@ pub const Type = extern union { // Special exceptions have to be made when emitting functions due to // this returning false. .function, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, => return false, .optional => { @@ -2483,7 +2563,7 @@ pub const Type = extern union { } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(child_ty)); } else { - return !comptimeOnly(child_ty); + return !comptimeOnly(child_ty, mod); } }, @@ -2502,7 +2582,7 @@ pub const Type = extern union { } for (struct_obj.fields.values()) |field| { if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2511,16 +2591,15 @@ pub const Type = extern union { .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.fields.count() >= 2; }, .enum_numbered, .enum_nonexhaustive => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + const int_tag_ty = ty.intTagType(); + return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .@"union" => { @@ -2537,7 +2616,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2545,7 +2624,7 @@ pub const Type = extern union { }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { return true; } @@ -2555,7 +2634,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2563,18 +2642,16 @@ pub const Type = extern union { }, .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), + try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), - - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, + .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true; + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } return false; }, @@ -2588,7 +2665,21 @@ pub const Type = extern union { /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type) bool { + pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return true, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; return switch (ty.tag()) { .u1, .u8, @@ -2626,8 +2717,6 @@ pub const Type = extern union { .manyptr_const_u8_sentinel_0, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .pointer, .single_const_pointer, .single_mut_pointer, @@ -2670,10 +2759,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -2698,25 +2783,25 @@ pub const Type = extern union { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(), + => ty.childType().hasWellDefinedLayout(mod), - .optional => ty.isPtrLikeOptional(), + .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, .union_tagged => false, }; } - pub fn hasRuntimeBits(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable; + pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable; + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Fn => { const fn_info = ty.fnInfo(); if (fn_info.is_generic) return false; @@ -2727,18 +2812,18 @@ pub const Type = extern union { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly()) return false; + if (fn_info.return_type.comptimeOnly(mod)) return false; return true; }, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), } } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(), + else => return ty.hasRuntimeBitsIgnoreComptime(mod), }; } @@ -2761,11 +2846,11 @@ pub const Type = extern union { } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, target: Target) u32 { - return ptrAlignmentAdvanced(ty, target, null) catch unreachable; + pub fn ptrAlignment(ty: Type, mod: *const Module) u32 { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, target: Target, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { switch (ty.tag()) { .single_const_pointer, .single_mut_pointer, @@ -2780,10 +2865,10 @@ pub const Type = extern union { => { const child_type = ty.cast(Payload.ElemType).?.data; if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } - return (child_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, .manyptr_u8, @@ -2798,13 +2883,13 @@ pub const Type = extern union { if (ptr_info.@"align" != 0) { return ptr_info.@"align"; } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(target, opt_sema), + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, } @@ -2843,13 +2928,13 @@ pub const Type = extern union { } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, target: Target) u32 { - return (ty.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + pub fn abiAlignment(ty: Type, mod: *const Module) u32 { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -2874,9 +2959,29 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; + return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const opt_sema = switch (strat) { .sema => |sema| sema, else => null, @@ -2902,12 +3007,6 @@ pub const Type = extern union { .anyopaque, => return AbiAlignmentAdvanced{ .scalar = 1 }, - .fn_noreturn_no_args, // represents machine code; not a pointer - .fn_void_no_args, // represents machine code; not a pointer - .fn_naked_noreturn_no_args, // represents machine code; not a pointer - .fn_ccc_void_no_args, // represents machine code; not a pointer - => return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }, - // represents machine code; not a pointer .function => { const alignment = ty.castTag(.function).?.data.alignment; @@ -2958,12 +3057,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; }, }, .f128 => switch (target.c_type_bit_size(.longdouble)) { @@ -2980,11 +3078,11 @@ pub const Type = extern union { .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), + .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), .vector => { const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), target, opt_sema); + const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); const bytes = ((bits * len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; @@ -2996,34 +3094,28 @@ pub const Type = extern union { .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) }; - }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, else => {}, } switch (strat) { .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } - return child_type.abiAlignmentAdvanced(target, strat); + return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) { + .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, }, @@ -3034,10 +3126,10 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; - const code_align = abiAlignment(Type.anyerror, target); + const code_align = abiAlignment(Type.anyerror, mod); switch (strat) { .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { @@ -3045,11 +3137,11 @@ pub const Type = extern union { } return AbiAlignmentAdvanced{ .scalar = @max( code_align, - (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, + (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(target, strat)) { + switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ .scalar = @max(code_align, payload_align), @@ -3089,20 +3181,20 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) }; + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; } const fields = ty.structFields(); var big_align: u32 = 0; for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3114,7 +3206,7 @@ pub const Type = extern union { // This logic is duplicated in Module.Struct.Field.alignment. if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. big_align = @max(big_align, 16); @@ -3130,9 +3222,9 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits())) continue; + if (!(field_ty.hasRuntimeBits(mod))) continue; - switch (try field_ty.abiAlignmentAdvanced(target, strat)) { + switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |field_align| big_align = @max(big_align, field_align), .val => switch (strat) { .eager => unreachable, // field type alignment not resolved @@ -3145,17 +3237,16 @@ pub const Type = extern union { }, .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(target) }; + const int_tag_ty = ty.intTagType(); + return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); }, .empty_struct, @@ -3181,7 +3272,7 @@ pub const Type = extern union { pub fn abiAlignmentAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3195,6 +3286,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. + const target = mod.getTarget(); return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); @@ -3206,23 +3298,23 @@ pub const Type = extern union { }; if (union_obj.fields.count() == 0) { if (have_tag) { - return abiAlignmentAdvanced(union_obj.tag_ty, target, strat); + return abiAlignmentAdvanced(union_obj.tag_ty, mod, strat); } else { return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) }; } } var max_align: u32 = 0; - if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target); + if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3236,8 +3328,8 @@ pub const Type = extern union { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -3245,8 +3337,8 @@ pub const Type = extern union { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, target: Target) u64 { - return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar; + pub fn abiSize(ty: Type, mod: *const Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } const AbiSizeAdvanced = union(enum) { @@ -3262,14 +3354,30 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .@"opaque" => unreachable, // no size available .noreturn => unreachable, @@ -3308,7 +3416,7 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) }; + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; }, else => { switch (strat) { @@ -3327,22 +3435,21 @@ pub const Type = extern union { if (field_count == 0) { return AbiSizeAdvanced{ .scalar = 0 }; } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) }; + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) }; + const int_tag_ty = ty.intTagType(); + return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, false); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, true); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, .u1, @@ -3361,7 +3468,7 @@ pub const Type = extern union { .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3372,7 +3479,7 @@ pub const Type = extern union { }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3391,10 +3498,10 @@ pub const Type = extern union { .val = try Value.Tag.lazy_size.create(arena, ty), }, }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(target, opt_sema); + const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); const total_bits = elem_bits * payload.len; const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) { + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, .val => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty), @@ -3450,12 +3557,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; }, }, @@ -3473,11 +3579,6 @@ pub const Type = extern union { .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) }; - }, .optional => { var buf: Payload.ElemType = undefined; @@ -3487,16 +3588,16 @@ pub const Type = extern union { return AbiSizeAdvanced{ .scalar = 0 }; } - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; - if (ty.optionalReprIsPayload()) { - return abiSizeAdvanced(child_type, target, strat); + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_type, mod, strat); } - const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) { + const payload_size = switch (try child_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3510,7 +3611,7 @@ pub const Type = extern union { // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. return AbiSizeAdvanced{ - .scalar = child_type.abiAlignment(target) + payload_size, + .scalar = child_type.abiAlignment(mod) + payload_size, }; }, @@ -3518,17 +3619,17 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiAlignmentAdvanced. const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, target); - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + const code_size = abiSize(Type.anyerror, mod); + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) { // Same as anyerror. return AbiSizeAdvanced{ .scalar = code_size }; } - const code_align = abiAlignment(Type.anyerror, target); - const payload_align = abiAlignment(data.payload, target); - const payload_size = switch (try data.payload.abiSizeAdvanced(target, strat)) { + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(data.payload, mod); + const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3556,7 +3657,7 @@ pub const Type = extern union { pub fn abiSizeAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3570,7 +3671,7 @@ pub const Type = extern union { }, .eager => {}, } - return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) }; + return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; } fn intAbiSize(bits: u16, target: Target) u64 { @@ -3585,8 +3686,8 @@ pub const Type = extern union { ); } - pub fn bitSize(ty: Type, target: Target) u64 { - return bitSizeAdvanced(ty, target, null) catch unreachable; + pub fn bitSize(ty: Type, mod: *const Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; } /// If you pass `opt_sema`, any recursive type resolutions will happen if @@ -3594,15 +3695,29 @@ pub const Type = extern union { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .anyopaque => unreachable, .type => unreachable, @@ -3633,68 +3748,67 @@ pub const Type = extern union { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(target, opt_sema); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, .tuple, .anon_struct => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } var total: u64 = 0; for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, target, opt_sema); + total += try bitSizeAdvanced(field_ty, mod, opt_sema); } return total; }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return try bitSizeAdvanced(int_tag_ty, target, opt_sema); + const int_tag_ty = ty.intTagType(); + return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, .@"union", .union_safety_tagged, .union_tagged => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } const union_obj = ty.cast(Payload.Union).?.data; assert(union_obj.haveFieldTypes()); var size: u64 = 0; for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, target, opt_sema)); + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); } return size; }, .vector => { const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return elem_bit_size * payload.len; }, .array_u8 => return 8 * ty.castTag(.array_u8).?.data, .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); + const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); if (elem_size == 0 or payload.len == 0) return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return (payload.len - 1) * 8 * elem_size + elem_bit_size; }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; const elem_size = std.math.max( - payload.elem_type.abiAlignment(target), - payload.elem_type.abiSize(target), + payload.elem_type.abiAlignment(mod), + payload.elem_type.abiSize(mod), ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return payload.len * 8 * elem_size + elem_bit_size; }, @@ -3757,12 +3871,10 @@ pub const Type = extern union { .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data, - .optional, .error_union => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, target, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; }, .atomic_order, @@ -3782,8 +3894,8 @@ pub const Type = extern union { /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn layoutIsResolved(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { return struct_ty.data.haveLayout(); @@ -3798,16 +3910,16 @@ pub const Type = extern union { }, .Array => { if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(); + return ty.childType().layoutIsResolved(mod); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, else => return true, } @@ -3994,13 +4106,13 @@ pub const Type = extern union { }; } - pub fn isAllowzeroPtr(self: Type) bool { + pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { return switch (self.tag()) { .pointer => { const payload = self.castTag(.pointer).?.data; return payload.@"allowzero"; }, - else => return self.zigTypeTag() == .Optional, + else => return self.zigTypeTag(mod) == .Optional, }; } @@ -4016,7 +4128,7 @@ pub const Type = extern union { }; } - pub fn isPtrAtRuntime(self: Type) bool { + pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { switch (self.tag()) { .c_const_pointer, .c_mut_pointer, @@ -4040,7 +4152,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag() != .Pointer) return false; + if (child_type.zigTypeTag(mod) != .Pointer) return false; const info = child_type.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4054,15 +4166,15 @@ pub const Type = extern union { /// For pointer-like optionals, returns true, otherwise returns the allowzero property /// of pointers. - pub fn ptrAllowsZero(ty: Type) bool { - if (ty.isPtrLikeOptional()) { + pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { return true; } return ty.ptrInfo().data.@"allowzero"; } /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type) bool { + pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4072,7 +4184,7 @@ pub const Type = extern union { .optional => { const child_ty = ty.castTag(.optional).?.data; - switch (child_ty.zigTypeTag()) { + switch (child_ty.zigTypeTag(mod)) { .Pointer => { const info = child_ty.ptrInfo().data; switch (info.size) { @@ -4093,7 +4205,7 @@ pub const Type = extern union { /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. - pub fn isPtrLikeOptional(self: Type) bool { + pub fn isPtrLikeOptional(self: Type, mod: *const Module) bool { switch (self.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4103,7 +4215,7 @@ pub const Type = extern union { .optional => { const child_ty = self.castTag(.optional).?.data; - if (child_ty.zigTypeTag() != .Pointer) return false; + if (child_ty.zigTypeTag(mod) != .Pointer) return false; const info = child_ty.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4166,7 +4278,7 @@ pub const Type = extern union { /// For [N]T, returns T. /// For []T, returns T. /// For anyframe->T, returns T. - pub fn elemType2(ty: Type) Type { + pub fn elemType2(ty: Type, mod: *const Module) Type { return switch (ty.tag()) { .vector => ty.castTag(.vector).?.data.elem_type, .array => ty.castTag(.array).?.data.elem_type, @@ -4181,7 +4293,7 @@ pub const Type = extern union { .single_const_pointer, .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(), + => ty.castPointer().?.data.shallowElemType(mod), .array_u8, .array_u8_sentinel_0, @@ -4197,7 +4309,7 @@ pub const Type = extern union { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; if (info.size == .One) { - return child_ty.shallowElemType(); + return child_ty.shallowElemType(mod); } else { return child_ty; } @@ -4213,16 +4325,16 @@ pub const Type = extern union { }; } - fn shallowElemType(child_ty: Type) Type { - return switch (child_ty.zigTypeTag()) { + fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { .Array, .Vector => child_ty.childType(), else => child_ty, }; } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type) Type { - return switch (ty.zigTypeTag()) { + pub fn scalarType(ty: Type, mod: *const Module) Type { + return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(), else => ty, }; @@ -4360,19 +4472,19 @@ pub const Type = extern union { return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(); + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool { + return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout { + pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout { switch (ty.tag()) { .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(target, false); + return union_obj.getLayout(mod, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(target, true); + return union_obj.getLayout(mod, true); }, else => unreachable, } @@ -4441,8 +4553,8 @@ pub const Type = extern union { }; } - pub fn isError(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .ErrorUnion, .ErrorSet => true, else => false, }; @@ -4543,14 +4655,21 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type) bool { - return self.isSignedInt() or self.isUnsignedInt(); + pub fn isInt(self: Type, mod: *const Module) bool { + return self.isSignedInt(mod) or self.isUnsignedInt(mod); } /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(self: Type) bool { - return switch (self.tag()) { - .int_signed, + pub fn isSignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .signed, + .simple_type => |s| return switch (s) { + .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .i8, .isize, .c_char, @@ -4569,9 +4688,16 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(self: Type) bool { - return switch (self.tag()) { - .int_unsigned, + pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .unsigned, + .simple_type => |s| return switch (s) { + .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .usize, .c_ushort, .c_uint, @@ -4592,8 +4718,8 @@ pub const Type = extern union { /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isAbiInt(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, .Struct => ty.containerLayout() == .Packed, else => false, @@ -4601,17 +4727,26 @@ pub const Type = extern union { } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int { - var ty = self; + pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType { + const target = mod.getTarget(); + var ty = starting_ty; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { - .int_unsigned => return .{ - .signedness = .unsigned, - .bits = ty.castTag(.int_unsigned).?.data, - }, - .int_signed => return .{ - .signedness = .signed, - .bits = ty.castTag(.int_signed).?.data, - }, .u1 => return .{ .signedness = .unsigned, .bits = 1 }, .u8 => return .{ .signedness = .unsigned, .bits = 8 }, .i8 => return .{ .signedness = .signed, .bits = 8 }, @@ -4729,32 +4864,14 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnParamLen(self: Type) usize { - return switch (self.tag()) { - .fn_noreturn_no_args => 0, - .fn_void_no_args => 0, - .fn_naked_noreturn_no_args => 0, - .fn_ccc_void_no_args => 0, - .function => self.castTag(.function).?.data.param_types.len, - - else => unreachable, - }; + return self.castTag(.function).?.data.param_types.len; } /// Asserts the type is a function. The length of the slice must be at least the length /// given by `fnParamLen`. pub fn fnParamTypes(self: Type, types: []Type) void { - switch (self.tag()) { - .fn_noreturn_no_args => return, - .fn_void_no_args => return, - .fn_naked_noreturn_no_args => return, - .fn_ccc_void_no_args => return, - .function => { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - }, - - else => unreachable, - } + const payload = self.castTag(.function).?.data; + @memcpy(types[0..payload.param_types.len], payload.param_types); } /// Asserts the type is a function. @@ -4769,33 +4886,15 @@ pub const Type = extern union { } } - /// Asserts the type is a function. - pub fn fnReturnType(self: Type) Type { - return switch (self.tag()) { - .fn_noreturn_no_args => Type.initTag(.noreturn), - .fn_naked_noreturn_no_args => Type.initTag(.noreturn), - - .fn_void_no_args, - .fn_ccc_void_no_args, - => Type.initTag(.void), - - .function => self.castTag(.function).?.data.return_type, - - else => unreachable, - }; + /// Asserts the type is a function or a function pointer. + pub fn fnReturnType(ty: Type) Type { + const fn_ty = if (ty.castPointer()) |p| p.data else ty; + return fn_ty.castTag(.function).?.data.return_type; } /// Asserts the type is a function. pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return switch (self.tag()) { - .fn_noreturn_no_args => .Unspecified, - .fn_void_no_args => .Unspecified, - .fn_naked_noreturn_no_args => .Naked, - .fn_ccc_void_no_args => .C, - .function => self.castTag(.function).?.data.cc, - - else => unreachable, - }; + return self.castTag(.function).?.data.cc; } /// Asserts the type is a function. @@ -4809,15 +4908,15 @@ pub const Type = extern union { }; } - pub fn isValidParamType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque, .NoReturn => false, else => true, }; } - pub fn isValidReturnType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque => false, else => true, }; @@ -4825,87 +4924,43 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnIsVarArgs(self: Type) bool { - return switch (self.tag()) { - .fn_noreturn_no_args => false, - .fn_void_no_args => false, - .fn_naked_noreturn_no_args => false, - .fn_ccc_void_no_args => false, - .function => self.castTag(.function).?.data.is_var_args, - - else => unreachable, - }; + return self.castTag(.function).?.data.is_var_args; } pub fn fnInfo(ty: Type) Payload.Function.Data { - return switch (ty.tag()) { - .fn_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_naked_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Naked, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_ccc_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .C, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .function => ty.castTag(.function).?.data, - - else => unreachable, - }; + return ty.castTag(.function).?.data; } - pub fn isNumeric(self: Type) bool { - return switch (self.tag()) { + pub fn isNumeric(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .simple_type => |s| return switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + => true, + + else => false, + }, + else => false, + }; + return switch (ty.tag()) { .f16, .f32, .f64, @@ -4937,8 +4992,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_unsigned, - .int_signed, => true, else => false, @@ -4947,8 +5000,30 @@ pub const Type = extern union { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { var ty = starting_type; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { .f16, .f32, @@ -4988,10 +5063,6 @@ pub const Type = extern union { .error_set_single, .error_set, .error_set_merged, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -5047,7 +5118,7 @@ pub const Type = extern union { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue() != null) continue; + if (field.ty.onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5058,7 +5129,7 @@ pub const Type = extern union { for (tuple.values, 0..) |val, i| { const is_comptime = val.tag() != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue() != null) continue; + if (tuple.types[i].onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5067,7 +5138,7 @@ pub const Type = extern union { .enum_numbered => { const enum_numbered = ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits()) { + if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { return null; } assert(enum_numbered.fields.count() == 1); @@ -5075,7 +5146,7 @@ pub const Type = extern union { }, .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits()) { + if (enum_full.tag_ty.hasRuntimeBits(mod)) { return null; } switch (enum_full.fields.count()) { @@ -5098,7 +5169,7 @@ pub const Type = extern union { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits()) { + if (!tag_ty.hasRuntimeBits(mod)) { return Value.zero; } else { return null; @@ -5106,10 +5177,10 @@ pub const Type = extern union { }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null; + const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue() orelse return null; + const val_val = only_field.ty.onePossibleValue(mod) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -5121,17 +5192,10 @@ pub const Type = extern union { .null => return Value.initTag(.null_value), .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue() != null) + if (ty.elemType().onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -5146,7 +5210,22 @@ pub const Type = extern union { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type) bool { + pub fn comptimeOnly(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -5211,8 +5290,6 @@ pub const Type = extern union { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -5223,10 +5300,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -5236,7 +5309,7 @@ pub const Type = extern union { .array, .array_sentinel, .vector, - => return ty.childType().comptimeOnly(), + => return ty.childType().comptimeOnly(mod), .pointer, .single_const_pointer, @@ -5249,10 +5322,10 @@ pub const Type = extern union { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); } }, @@ -5261,14 +5334,14 @@ pub const Type = extern union { .optional_single_const_pointer, => { var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(); + return ty.optionalChild(&buf).comptimeOnly(mod); }, .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly()) return true; + if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; } return false; }, @@ -5301,48 +5374,48 @@ pub const Type = extern union { } }, - .error_union => return ty.errorUnionPayload().comptimeOnly(), + .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); }, .enum_numbered => { const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, .enum_full, .enum_nonexhaustive => { const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, }; } - pub fn isArrayOrVector(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, else => false, }; } - pub fn isIndexable(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isIndexable(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, }; } - pub fn indexableHasLen(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn indexableHasLen(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5366,19 +5439,19 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try minIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, target: Target) !Value { - assert(ty.zigTypeTag() == .Int); - const info = ty.intInfo(target); + /// Asserts that self.zigTypeTag(mod) == .Int. + pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { + assert(ty.zigTypeTag(mod) == .Int); + const info = ty.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5405,9 +5478,9 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try maxIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; @@ -5415,9 +5488,9 @@ pub const Type = extern union { } /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value { - assert(self.zigTypeTag() == .Int); - const info = self.intInfo(target); + pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { + assert(self.zigTypeTag(mod) == .Int); + const info = self.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5452,21 +5525,25 @@ pub const Type = extern union { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type { + pub fn intTagType(ty: Type) Type { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - buffer.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - return Type.initPayload(&buffer.base); + @panic("TODO move enum_simple to use the intern pool"); + //const enum_simple = ty.castTag(.enum_simple).?.data; + //const field_count = enum_simple.fields.count(); + //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + //buffer.* = .{ + // .base = .{ .tag = .int_unsigned }, + // .data = bits, + //}; + //return Type.initPayload(&buffer.base); + }, + .union_tagged => { + @panic("TODO move union_tagged to use the intern pool"); + //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), }, - .union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), else => unreachable, } } @@ -5566,7 +5643,7 @@ pub const Type = extern union { }; const end_val = Value.initPayload(&end_payload.base); if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m.getTarget())); + return @intCast(usize, int_val.toUnsignedInt(m)); } }; switch (ty.tag()) { @@ -5598,11 +5675,7 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, .atomic_order, @@ -5675,19 +5748,19 @@ pub const Type = extern union { } } - pub fn structFieldAlign(ty: Type, index: usize, target: Target) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(target, struct_obj.layout); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(target); + return union_obj.fields.values()[index].normalAlignment(mod); }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(target), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(target), + .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), else => unreachable, } } @@ -5710,7 +5783,7 @@ pub const Type = extern union { } } - pub fn structFieldValueComptime(ty: Type, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -5718,14 +5791,14 @@ pub const Type = extern union { if (field.is_comptime) { return field.default_val; } else { - return field.ty.onePossibleValue(); + return field.ty.onePossibleValue(mod); } }, .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; if (val.tag() == .unreachable_value) { - return tuple.types[index].onePossibleValue(); + return tuple.types[index].onePossibleValue(mod); } else { return val; } @@ -5734,7 +5807,7 @@ pub const Type = extern union { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; if (val.tag() == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(); + return anon_struct.types[index].onePossibleValue(mod); } else { return val; } @@ -5765,7 +5838,7 @@ pub const Type = extern union { } } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 { + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -5774,9 +5847,9 @@ pub const Type = extern union { var elem_size_bits: u16 = undefined; var running_bits: u16 = 0; for (struct_obj.fields.values(), 0..) |f, i| { - if (!f.ty.hasRuntimeBits()) continue; + if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(target)); + const field_bits = @intCast(u16, f.ty.bitSize(mod)); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -5797,9 +5870,10 @@ pub const Type = extern union { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - target: Target, + module: *const Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { + const mod = it.module; var i = it.field; if (it.struct_obj.fields.count() <= i) return null; @@ -5811,35 +5885,35 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[i]; it.field += 1; - if (field.is_comptime or !field.ty.hasRuntimeBits()) { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) { return FieldOffset{ .field = i, .offset = it.offset }; } - const field_align = field.alignment(it.target, it.struct_obj.layout); + const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); - it.offset = field_offset + field.ty.abiSize(it.target); + it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } }; /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator { + pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - return .{ .struct_obj = struct_obj, .target = target }; + return .{ .struct_obj = struct_obj, .module = mod }; } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 { + pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(target); + var it = ty.iterateStructOffsets(mod); while (it.next()) |field_offset| { if (index == field_offset.field) return field_offset.offset; @@ -5856,17 +5930,17 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) { + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; @@ -5875,7 +5949,7 @@ pub const Type = extern union { .@"union" => return 0, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(target, true); + const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); @@ -6050,10 +6124,6 @@ pub const Type = extern union { manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, - fn_noreturn_no_args, - fn_void_no_args, - fn_naked_noreturn_no_args, - fn_ccc_void_no_args, single_const_pointer_to_comptime_int, const_slice_u8, const_slice_u8_sentinel_0, @@ -6087,8 +6157,6 @@ pub const Type = extern union { c_mut_pointer, const_slice, mut_slice, - int_signed, - int_unsigned, function, optional, optional_single_mut_pointer, @@ -6157,10 +6225,6 @@ pub const Type = extern union { .enum_literal, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, @@ -6204,10 +6268,6 @@ pub const Type = extern union { .anyframe_T, => Payload.ElemType, - .int_signed, - .int_unsigned, - => Payload.Bits, - .error_set => Payload.ErrorSet, .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, @@ -6232,7 +6292,10 @@ pub const Type = extern union { pub fn init(comptime t: Tag) file_struct.Type { comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return .{ .tag_if_small_enough = t }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = t }, + }; } pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { @@ -6241,7 +6304,10 @@ pub const Type = extern union { .base = .{ .tag = t }, .data = data, }; - return file_struct.Type{ .ptr_otherwise = &p.base }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &p.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -6422,10 +6488,9 @@ pub const Type = extern union { runtime = std.math.maxInt(u32) - 1, _, }; - - pub fn alignment(data: Data, target: Target) u32 { + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; - return abiAlignment(data.pointee_type, target); + return abiAlignment(data.pointee_type, mod); } }; }; @@ -6537,12 +6602,11 @@ pub const Type = extern union { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"noreturn" = initTag(.noreturn); pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - const target = mod.getTarget(); - var d = data; if (d.size == .C) { @@ -6554,8 +6618,8 @@ pub const Type = extern union { // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (d.@"align" != 0) canonicalize: { - if (!d.pointee_type.layoutIsResolved()) break :canonicalize; - if (d.@"align" == d.pointee_type.abiAlignment(target)) { + if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize; + if (d.@"align" == d.pointee_type.abiAlignment(mod)) { d.@"align" = 0; } } @@ -6565,7 +6629,7 @@ pub const Type = extern union { // needs to be resolved before calling this ptr() function. if (d.host_size != 0) { assert(d.bit_offset < d.host_size * 8); - if (d.host_size * 8 == d.pointee_type.bitSize(target)) { + if (d.host_size * 8 == d.pointee_type.bitSize(mod)) { assert(d.bit_offset == 0); d.host_size = 0; } @@ -6676,7 +6740,7 @@ pub const Type = extern union { payload: Type, mod: *Module, ) Allocator.Error!Type { - assert(error_set.zigTypeTag() == .ErrorSet); + assert(error_set.zigTypeTag(mod) == .ErrorSet); if (error_set.eql(Type.anyerror, mod) and payload.eql(Type.void, mod)) { @@ -6696,83 +6760,6 @@ pub const Type = extern union { return @intCast(u16, base + @boolToInt(upper < max)); } - pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { - const bits = smallestUnsignedBits(max); - return intWithBits(arena, false, bits); - } - - pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { - return if (sign) switch (bits) { - 8 => initTag(.i8), - 16 => initTag(.i16), - 32 => initTag(.i32), - 64 => initTag(.i64), - else => return Tag.int_signed.create(arena, bits), - } else switch (bits) { - 1 => initTag(.u1), - 8 => initTag(.u8), - 16 => initTag(.u16), - 32 => initTag(.u32), - 64 => initTag(.u64), - else => return Tag.int_unsigned.create(arena, bits), - }; - } - - /// Given a value representing an integer, returns the number of bits necessary to represent - /// this value in an integer. If `sign` is true, returns the number of bits necessary in a - /// twos-complement integer; otherwise in an unsigned integer. - /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. - pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { - assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); - assert(sign); - return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; - }, - else => { - const x = val.toUnsignedInt(target); - return smallestUnsignedBits(x) + @boolToInt(sign); - }, - } - } - - /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither - /// value is undef. - /// TODO: if #3806 is implemented, this becomes trivial - pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); - - if (std.debug.runtime_safety) { - assert(Value.order(min, max, target).compare(.lte)); - } - - const sign = min.orderAgainstZero() == .lt; - - const min_val_bits = intBitsForValue(target, min, sign); - const max_val_bits = intBitsForValue(target, max, sign); - const bits = @max(min_val_bits, max_val_bits); - - return intWithBits(arena, sign, bits); - } - /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; diff --git a/src/value.zig b/src/value.zig index af2d7b1ca2..8c824b0720 100644 --- a/src/value.zig +++ b/src/value.zig @@ -11,17 +11,24 @@ const Module = @import("Module.zig"); const Air = @import("Air.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); - -/// This is the raw data, with no bookkeeping, no memory awareness, -/// no de-duplication, and no type system awareness. -/// It's important for this type to be small. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Value = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, +const InternPool = @import("InternPool.zig"); + +pub const Value = struct { + /// We are migrating towards using this for every Value object. However, many + /// values are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, + /// no de-duplication, and no type system awareness. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { @@ -81,10 +88,6 @@ pub const Value = extern union { manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, single_const_pointer_to_comptime_int_type, const_slice_u8_type, const_slice_u8_sentinel_0_type, @@ -108,7 +111,6 @@ pub const Value = extern union { // After this, the tag requires a payload. ty, - int_type, int_u64, int_i64, int_big_positive, @@ -232,10 +234,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -304,7 +302,6 @@ pub const Value = extern union { .lazy_size, => Payload.Ty, - .int_type => Payload.IntType, .int_u64 => Payload.U64, .int_i64 => Payload.I64, .function => Payload.Function, @@ -332,7 +329,10 @@ pub const Value = extern union { .base = .{ .tag = t }, .data = data, }; - return Value{ .ptr_otherwise = &ptr.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &ptr.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -342,37 +342,47 @@ pub const Value = extern union { pub fn initTag(small_tag: Tag) Value { assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Value { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } pub fn tag(self: Value) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return self.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return self.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Value, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -381,11 +391,15 @@ pub const Value = extern union { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -393,9 +407,15 @@ pub const Value = extern union { /// It's intentional that this function is not passed a corresponding Type, so that /// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types. pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Value{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1_type, .u8_type, .i8_type, @@ -435,10 +455,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -481,19 +497,24 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_type => return self.copyPayloadShallow(arena, Payload.IntType), .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), .int_big_positive, .int_big_negative => { const old_payload = self.cast(Payload.BigInt).?; const new_payload = try arena.create(Payload.BigInt); new_payload.* = .{ - .base = .{ .tag = self.ptr_otherwise.tag }, + .base = .{ .tag = self.legacy.ptr_otherwise.tag }, .data = try arena.dupe(std.math.big.Limb, old_payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), @@ -512,7 +533,10 @@ pub const Value = extern union { .container_ty = try payload.data.container_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .comptime_field_ptr => { const payload = self.cast(Payload.ComptimeFieldPtr).?; @@ -524,7 +548,10 @@ pub const Value = extern union { .field_ty = try payload.data.field_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .elem_ptr => { const payload = self.castTag(.elem_ptr).?; @@ -537,7 +564,10 @@ pub const Value = extern union { .index = payload.data.index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .field_ptr => { const payload = self.castTag(.field_ptr).?; @@ -550,7 +580,10 @@ pub const Value = extern union { .field_index = payload.data.field_index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .bytes => { const bytes = self.castTag(.bytes).?.data; @@ -559,7 +592,10 @@ pub const Value = extern union { .base = .{ .tag = .bytes }, .data = try arena.dupe(u8, bytes), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), .repeated, @@ -574,7 +610,10 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .slice => { const payload = self.castTag(.slice).?; @@ -586,7 +625,10 @@ pub const Value = extern union { .len = try payload.data.len.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .float_16 => return self.copyPayloadShallow(arena, Payload.Float_16), .float_32 => return self.copyPayloadShallow(arena, Payload.Float_32), @@ -600,7 +642,10 @@ pub const Value = extern union { .base = payload.base, .data = try arena.dupe(u8, payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), .@"error" => return self.copyPayloadShallow(arena, Payload.Error), @@ -615,7 +660,10 @@ pub const Value = extern union { for (new_payload.data, 0..) |*elem, i| { elem.* = try payload.data[i].copy(arena); } - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .@"union" => { @@ -628,7 +676,10 @@ pub const Value = extern union { .val = try tag_and_val.val.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .inferred_alloc => unreachable, @@ -640,7 +691,10 @@ pub const Value = extern union { const payload = self.cast(T).?; const new_payload = try arena.create(T); new_payload.* = payload.*; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -660,6 +714,10 @@ pub const Value = extern union { out_stream: anytype, ) !void { comptime assert(fmt.len == 0); + if (start_val.ip_index != .none) { + try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)}); + return; + } var val = start_val; while (true) switch (val.tag()) { .u1_type => return out_stream.writeAll("u1"), @@ -701,10 +759,6 @@ pub const Value = extern union { .noreturn_type => return out_stream.writeAll("noreturn"), .null_type => return out_stream.writeAll("@Type(.Null)"), .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"), - .fn_void_no_args_type => return out_stream.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), .anyframe_type => return out_stream.writeAll("anyframe"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), @@ -755,13 +809,6 @@ pub const Value = extern union { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return out_stream.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), @@ -848,7 +895,6 @@ pub const Value = extern union { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - const target = mod.getTarget(); switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; @@ -863,7 +909,7 @@ pub const Value = extern union { }, .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target)); + const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); @memset(result, byte); return result; @@ -877,7 +923,7 @@ pub const Value = extern union { .the_only_possible_value => return &[_]u8{}, .slice => { const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod); + return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), } @@ -888,15 +934,19 @@ pub const Value = extern union { var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget())); + elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; } - pub const ToTypeBuffer = Type.Payload.Bits; - /// Asserts that the value is representable as a type. - pub fn toType(self: Value, buffer: *ToTypeBuffer) Type { + pub fn toType(self: Value) Type { + if (self.ip_index != .none) { + return .{ + .ip_index = self.ip_index, + .legacy = undefined, + }; + } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, .u1_type => Type.initTag(.u1), @@ -938,10 +988,6 @@ pub const Value = extern union { .noreturn_type => Type.initTag(.noreturn), .null_type => Type.initTag(.null), .undefined_type => Type.initTag(.undefined), - .fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args), - .fn_void_no_args_type => Type.initTag(.fn_void_no_args), - .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args), - .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), @@ -964,17 +1010,6 @@ pub const Value = extern union { .extern_options_type => Type.initTag(.extern_options), .type_info_type => Type.initTag(.type_info), - .int_type => { - const payload = self.castTag(.int_type).?.data; - buffer.* = .{ - .base = .{ - .tag = if (payload.signed) .int_signed else .int_unsigned, - }, - .data = payload.bits, - }; - return Type.initPayload(&buffer.base); - }, - else => unreachable, }; } @@ -1050,7 +1085,7 @@ pub const Value = extern union { } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag() == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); + if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, @@ -1068,10 +1103,9 @@ pub const Value = extern union { }; if (values.entries.len == 0) { // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget())); + break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); + const int_tag_ty = ty.intTagType(); break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; @@ -1086,15 +1120,15 @@ pub const Value = extern union { } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst { - return val.toBigIntAdvanced(space, target, null) catch unreachable; + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst { + return val.toBigIntAdvanced(space, mod, null) catch unreachable; } /// Asserts the value is an integer. pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { switch (val.tag()) { @@ -1114,7 +1148,7 @@ pub const Value = extern union { }, .runtime_value => { const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, target, opt_sema); + return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), @@ -1128,7 +1162,7 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiAlignment(target); + const x = ty.abiAlignment(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .lazy_size => { @@ -1136,14 +1170,14 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiSize(target); + const x = ty.abiSize(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .elem_ptr => { const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(target); + const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = elem_ptr.elem_ty.abiSize(mod); const new_addr = array_addr + elem_size * elem_ptr.index; return BigIntMutable.init(&space.limbs, new_addr).toConst(); }, @@ -1154,13 +1188,13 @@ pub const Value = extern union { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, target: Target) ?u64 { - return getUnsignedIntAdvanced(val, target, null) catch unreachable; + pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 { + return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, target: Target, opt_sema: ?*Sema) !?u64 { + pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { switch (val.tag()) { .zero, .bool_false, @@ -1181,17 +1215,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiAlignment(target); + return ty.abiAlignment(mod); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiSize(target); + return ty.abiSize(mod); } }, @@ -1200,12 +1234,12 @@ pub const Value = extern union { } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, target: Target) u64 { - return getUnsignedInt(val, target).?; + pub fn toUnsignedInt(val: Value, mod: *const Module) u64 { + return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, target: Target) i64 { + pub fn toSignedInt(val: Value, mod: *const Module) i64 { switch (val.tag()) { .zero, .bool_false, @@ -1223,11 +1257,11 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(target)); + return @intCast(i64, ty.abiAlignment(mod)); }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(target)); + return @intCast(i64, ty.abiSize(mod)); }, .undef => unreachable, @@ -1276,17 +1310,17 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const size = @intCast(usize, ty.abiSize(target)); + const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { buffer[0] = @boolToInt(val.toBool()); }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; @@ -1307,7 +1341,7 @@ pub const Value = extern union { }; } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writeTwosComplement(buffer[0..byte_count], endian); } }, @@ -1322,7 +1356,7 @@ pub const Value = extern union { .Array => { const len = ty.arrayLen(); const elem_ty = ty.childType(); - const elem_size = @intCast(usize, elem_ty.abiSize(target)); + const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; @@ -1335,7 +1369,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, .Struct => switch (ty.containerLayout()) { @@ -1344,12 +1378,12 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); } }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1363,7 +1397,7 @@ pub const Value = extern union { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1373,10 +1407,10 @@ pub const Value = extern union { return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { - if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout; + if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { @@ -1395,11 +1429,11 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const bit_size = @intCast(usize, ty.bitSize(target)); + const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { const byte_index = switch (endian) { @@ -1413,8 +1447,8 @@ pub const Value = extern union { } }, .Int, .Enum => { - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); + const bits = ty.intInfo(mod).bits; + const abi_size = @intCast(usize, ty.abiSize(mod)); var enum_buffer: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_buffer); @@ -1431,7 +1465,7 @@ pub const Value = extern union { std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); } }, @@ -1445,7 +1479,7 @@ pub const Value = extern union { }, .Vector => { const elem_ty = ty.childType(); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); const len = @intCast(usize, ty.arrayLen()); var bits: u16 = 0; @@ -1467,7 +1501,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); bits += field_bits; } @@ -1479,7 +1513,7 @@ pub const Value = extern union { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, field_index.?); + const field_val = val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1490,10 +1524,10 @@ pub const Value = extern union { return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { @@ -1516,7 +1550,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { if (buffer[0] == 0) { @@ -1526,7 +1560,7 @@ pub const Value = extern union { } }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; if (bits == 0 or buffer.len == 0) return Value.zero; @@ -1560,7 +1594,7 @@ pub const Value = extern union { }, .Array => { const elem_ty = ty.childType(); - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var offset: usize = 0; for (elems) |*elem| { @@ -1572,7 +1606,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => switch (ty.containerLayout()) { @@ -1581,14 +1615,14 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -1609,7 +1643,7 @@ pub const Value = extern union { return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromMemory(child, mod, buffer, arena); @@ -1631,7 +1665,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { const byte = switch (endian) { @@ -1646,8 +1680,8 @@ pub const Value = extern union { }, .Int, .Enum => { if (buffer.len == 0) return Value.zero; - const int_info = ty.intInfo(target); - const abi_size = @intCast(usize, ty.abiSize(target)); + const int_info = ty.intInfo(mod); + const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; if (bits == 0) return Value.zero; @@ -1677,7 +1711,7 @@ pub const Value = extern union { const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; @@ -1694,7 +1728,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); bits += field_bits; } @@ -1706,7 +1740,7 @@ pub const Value = extern union { return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); @@ -1764,8 +1798,8 @@ pub const Value = extern union { } } - pub fn clz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return ty_bits - 1, @@ -1792,7 +1826,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.clz(ty_bits); }, @@ -1800,8 +1834,8 @@ pub const Value = extern union { } } - pub fn ctz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return 0, @@ -1828,7 +1862,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.ctz(); }, @@ -1836,7 +1870,7 @@ pub const Value = extern union { } } - pub fn popCount(val: Value, ty: Type, target: Target) u64 { + pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { assert(!val.isUndef()); switch (val.tag()) { .zero, .bool_false => return 0, @@ -1845,22 +1879,22 @@ pub const Value = extern union { .int_u64 => return @popCount(val.castTag(.int_u64).?.data), else => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, target); + const int = val.toBigInt(&buffer, mod); return @intCast(u64, int.popCount(info.bits)); }, } } - pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1872,16 +1906,16 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 assert(info.bits % 8 == 0); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1895,7 +1929,8 @@ pub const Value = extern union { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, target: Target) usize { + pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { + const target = mod.getTarget(); switch (self.tag()) { .zero, .bool_false, @@ -1926,7 +1961,7 @@ pub const Value = extern union { else => { var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, target).bitCountTwosComp(); + return self.toBigInt(&buffer, mod).bitCountTwosComp(); }, } } @@ -1962,12 +1997,13 @@ pub const Value = extern union { }; } - pub fn orderAgainstZero(lhs: Value) std.math.Order { - return orderAgainstZeroAdvanced(lhs, null) catch unreachable; + pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order { + return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { return switch (lhs.tag()) { @@ -1991,7 +2027,7 @@ pub const Value = extern union { // This is needed to correctly handle hashing the value. // Checks in Sema should prevent direct comparisons from reaching here. const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(opt_sema); + return val.orderAgainstZeroAdvanced(mod, opt_sema); }, .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), @@ -2001,7 +2037,7 @@ pub const Value = extern union { .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2013,7 +2049,7 @@ pub const Value = extern union { .lazy_size => { const ty = lhs.castTag(.lazy_size).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2031,7 +2067,7 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(opt_sema)) { + switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, .gt => return .gt, .eq => { @@ -2049,17 +2085,17 @@ pub const Value = extern union { } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order { - return orderAdvanced(lhs, rhs, target, null) catch unreachable; + pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order { + return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, opt_sema: ?*Sema) !std.math.Order { + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(opt_sema); + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -2093,22 +2129,22 @@ pub const Value = extern union { var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool { - return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable; + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool { + return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } pub fn compareHeteroAdvanced( lhs: Value, op: std.math.CompareOperator, rhs: Value, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) !bool { if (lhs.pointerDecl()) |lhs_decl| { @@ -2132,20 +2168,20 @@ pub const Value = extern union { else => {}, } } - return (try orderAdvanced(lhs, rhs, target, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) { + if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { return false; } } @@ -2165,7 +2201,7 @@ pub const Value = extern union { return switch (op) { .eq => lhs.eql(rhs, ty, mod), .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod.getTarget()), + else => compareHetero(lhs, op, rhs, mod), }; } @@ -2231,7 +2267,7 @@ pub const Value = extern union { .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -2346,7 +2382,7 @@ pub const Value = extern union { return true; } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { const fields = ty.structFields().values(); assert(fields.len == a_field_vals.len); for (fields, 0..) |field, i| { @@ -2406,12 +2442,10 @@ pub const Value = extern union { return false; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => { - var buf_a: ToTypeBuffer = undefined; - var buf_b: ToTypeBuffer = undefined; - const a_type = a.toType(&buf_a); - const b_type = b.toType(&buf_b); + const a_type = a.toType(); + const b_type = b.toType(); return a_type.eql(b_type, mod); }, .Enum => { @@ -2419,8 +2453,7 @@ pub const Value = extern union { var buf_b: Payload.U64 = undefined; const a_val = a.enumToInt(ty, &buf_a); const b_val = b.enumToInt(ty, &buf_b); - var buf_ty: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buf_ty); + const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -2466,11 +2499,11 @@ pub const Value = extern union { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue() != null; + return ty.onePossibleValue(mod) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue() != null) { + if (ty.onePossibleValue(mod) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2533,13 +2566,13 @@ pub const Value = extern union { else => {}, } if (a_tag == .null_value or a_tag == .@"error") return false; - return (try orderAdvanced(a, b, target, opt_sema)).compare(.eq); + return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; // The value is runtime-known and shouldn't affect the hash. @@ -2555,8 +2588,7 @@ pub const Value = extern union { => {}, .Type => { - var buf: ToTypeBuffer = undefined; - return val.toType(&buf).hashWithHasher(hasher, mod); + return val.toType().hashWithHasher(hasher, mod); }, .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. @@ -2588,7 +2620,7 @@ pub const Value = extern union { hash(slice.len, Type.usize, hasher, mod); }, - else => return hashPtr(val, hasher, mod.getTarget()), + else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2648,7 +2680,7 @@ pub const Value = extern union { .Enum => { var enum_space: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_space); - hashInt(int_val, hasher, mod.getTarget()); + hashInt(int_val, hasher, mod); }, .Union => { const union_obj = val.cast(Payload.Union).?.data; @@ -2691,7 +2723,7 @@ pub const Value = extern union { // The value is runtime-known and shouldn't affect the hash. if (val.tag() == .runtime_value) return; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque => unreachable, // Cannot hash opaque types .Void, .NoReturn, @@ -2700,8 +2732,7 @@ pub const Value = extern union { .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), .Type => { - var buf: ToTypeBuffer = undefined; - val.toType(&buf).hashWithHasher(hasher, mod); + val.toType().hashWithHasher(hasher, mod); }, .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { @@ -2711,7 +2742,7 @@ pub const Value = extern union { const ptr_ty = ty.slicePtrFieldType(&ptr_buf); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, - else => val.hashPtr(hasher, mod.getTarget()), + else => val.hashPtr(hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2821,16 +2852,16 @@ pub const Value = extern union { }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { var buffer: BigIntSpace = undefined; - const big = int_val.toBigInt(&buffer, target); + const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); for (big.limbs) |limb| { std.hash.autoHash(hasher, limb); } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { switch (ptr_val.tag()) { .decl_ref, .decl_ref_mut, @@ -2847,25 +2878,25 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, target); + hashPtr(elem_ptr.array_ptr, hasher, mod); std.hash.autoHash(hasher, Value.Tag.elem_ptr); std.hash.autoHash(hasher, elem_ptr.index); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, target); + hashPtr(field_ptr.container_ptr, hasher, mod); std.hash.autoHash(hasher, field_ptr.field_index); }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, target); + hashPtr(err_union_ptr.container_ptr, hasher, mod); }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, target); + hashPtr(opt_ptr.container_ptr, hasher, mod); }, .zero, @@ -2880,7 +2911,7 @@ pub const Value = extern union { .the_only_possible_value, .lazy_align, .lazy_size, - => return hashInt(ptr_val, hasher, target), + => return hashInt(ptr_val, hasher, mod), else => unreachable, } @@ -2897,11 +2928,11 @@ pub const Value = extern union { pub fn sliceLen(val: Value, mod: *Module) u64 { return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()), + .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), .decl_ref => { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2910,7 +2941,7 @@ pub const Value = extern union { .decl_ref_mut => { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2918,7 +2949,7 @@ pub const Value = extern union { }, .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag() == .Array) { + if (payload.field_ty.zigTypeTag(mod) == .Array) { return payload.field_ty.arrayLen(); } else { return 1; @@ -3003,7 +3034,7 @@ pub const Value = extern union { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, data.field_index); + const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValueAdvanced(mod, index, arena, buffer); } else unreachable; }, @@ -3032,10 +3063,7 @@ pub const Value = extern union { } /// Returns true if a Value is backed by a variable - pub fn isVariable( - val: Value, - mod: *Module, - ) bool { + pub fn isVariable(val: Value, mod: *Module) bool { return switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), @@ -3119,7 +3147,7 @@ pub const Value = extern union { }; } - pub fn fieldValue(val: Value, ty: Type, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -3131,14 +3159,14 @@ pub const Value = extern union { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue().?, + .the_only_possible_value => return ty.onePossibleValue(mod).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(index)) |some| { + if (ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -3165,7 +3193,7 @@ pub const Value = extern union { index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const ptr_val = switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, else => val, @@ -3207,7 +3235,7 @@ pub const Value = extern union { switch (self.tag()) { .slice => { const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod.getTarget()); + const len = payload.data.len.toUnsignedInt(mod); var elem_value_buf: ElemValueBuffer = undefined; var i: usize = 0; @@ -3233,7 +3261,7 @@ pub const Value = extern union { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value) bool { + pub fn isNull(self: Value, mod: *const Module) bool { return switch (self.tag()) { .null_value => true, .opt_payload => false, @@ -3254,7 +3282,7 @@ pub const Value = extern union { .int_i64, .int_big_positive, .int_big_negative, - => self.orderAgainstZero().compare(.eq), + => self.orderAgainstZero(mod).compare(.eq), .undef => unreachable, .unreachable_value => unreachable, @@ -3300,8 +3328,8 @@ pub const Value = extern union { } /// Value of the optional, null if optional has no payload. - pub fn optionalValue(val: Value) ?Value { - if (val.isNull()) return null; + pub fn optionalValue(val: Value, mod: *const Module) ?Value { + if (val.isNull(mod)) return null; // Valid for optional representation to be the direct value // and not use opt_payload. @@ -3333,20 +3361,20 @@ pub const Value = extern union { } pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - const target = mod.getTarget(); - if (int_ty.zigTypeTag() == .Vector) { + if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema); + scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } - return intToFloatScalar(val, arena, float_ty, target, opt_sema); + return intToFloatScalar(val, arena, float_ty, mod, opt_sema); } - pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value { + pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + const target = mod.getTarget(); switch (val.tag()) { .undef, .zero, .one => return val, .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 @@ -3369,17 +3397,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiAlignment(target), arena, float_ty, target); + return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiSize(target), arena, float_ty, target); + return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); } }, else => unreachable, @@ -3446,19 +3474,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intAddSatScalar(lhs, rhs, ty, arena, target); + return intAddSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3467,17 +3494,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3495,19 +3522,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intSubSatScalar(lhs, rhs, ty, arena, target); + return intSubSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3516,17 +3542,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3543,8 +3569,7 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen()); const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -3552,7 +3577,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3561,7 +3586,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), }; } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, target); + return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); } pub fn intMulWithOverflowScalar( @@ -3569,14 +3594,14 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -3607,14 +3632,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3631,7 +3656,7 @@ pub const Value = extern union { ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); } @@ -3651,19 +3676,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intMulSatScalar(lhs, rhs, ty, arena, target); + return intMulSatScalar(lhs, rhs, ty, arena, mod); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3672,17 +3696,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max( @@ -3702,24 +3726,24 @@ pub const Value = extern union { } /// Supports both floats and ints; handles undefined. - pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => rhs, .gt, .eq => lhs, }; } /// Supports both floats and ints; handles undefined. - pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => lhs, .gt, .eq => rhs, }; @@ -3727,24 +3751,23 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target); + scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return bitwiseNotScalar(val, ty, arena, target); + return bitwiseNotScalar(val, ty, arena, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (val.isUndef()) return Value.initTag(.undef); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits == 0) { return val; @@ -3753,7 +3776,7 @@ pub const Value = extern union { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3766,31 +3789,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, target); + return bitwiseAndScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3803,14 +3825,14 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3823,41 +3845,40 @@ pub const Value = extern union { const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt()) + const all_ones = if (ty.isSignedInt(mod)) try Value.Tag.int_i64.create(arena, -1) else - try ty.maxInt(arena, mod.getTarget()); + try ty.maxInt(arena, mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, target); + return bitwiseOrScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), @@ -3869,31 +3890,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, target); + return bitwiseXorScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3905,28 +3925,27 @@ pub const Value = extern union { } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, target); + return intDivScalar(lhs, rhs, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3946,28 +3965,27 @@ pub const Value = extern union { } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, target); + return intDivFloorScalar(lhs, rhs, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3987,28 +4005,27 @@ pub const Value = extern union { } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, target); + return intModScalar(lhs, rhs, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -4064,14 +4081,14 @@ pub const Value = extern union { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4111,14 +4128,14 @@ pub const Value = extern union { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4157,28 +4174,27 @@ pub const Value = extern union { } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, target); + return intMulScalar(lhs, rhs, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -4194,17 +4210,16 @@ pub const Value = extern union { } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, target); + return intTruncScalar(val, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4216,26 +4231,25 @@ pub const Value = extern union { bits: Value, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); var bits_buf: Value.ElemValueBuffer = undefined; const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target); + return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value { + pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, @@ -4248,27 +4262,26 @@ pub const Value = extern union { } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, target); + return shlScalar(lhs, rhs, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4289,8 +4302,7 @@ pub const Value = extern union { allocator: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -4298,7 +4310,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -4307,7 +4319,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), }; } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, target); + return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); } pub fn shlWithOverflowScalar( @@ -4315,12 +4327,12 @@ pub const Value = extern union { rhs: Value, ty: Type, allocator: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4348,19 +4360,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return shlSatScalar(lhs, rhs, ty, arena, target); + return shlSatScalar(lhs, rhs, ty, arena, mod); } pub fn shlSatScalar( @@ -4368,15 +4379,15 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -4397,14 +4408,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4419,33 +4430,32 @@ pub const Value = extern union { mod: *Module, ) !Value { const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod.getTarget()); + const int_info = ty.intInfo(mod); const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); return truncated; } pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, target); + return shrScalar(lhs, rhs, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { @@ -4478,12 +4488,12 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4514,14 +4524,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4573,14 +4583,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4632,14 +4642,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4691,14 +4701,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4744,12 +4754,12 @@ pub const Value = extern union { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4784,12 +4794,12 @@ pub const Value = extern union { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4824,12 +4834,12 @@ pub const Value = extern union { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4864,12 +4874,12 @@ pub const Value = extern union { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4904,12 +4914,12 @@ pub const Value = extern union { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4944,12 +4954,12 @@ pub const Value = extern union { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4984,12 +4994,12 @@ pub const Value = extern union { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5024,12 +5034,12 @@ pub const Value = extern union { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5064,12 +5074,12 @@ pub const Value = extern union { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5104,12 +5114,12 @@ pub const Value = extern union { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5144,12 +5154,12 @@ pub const Value = extern union { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5184,12 +5194,12 @@ pub const Value = extern union { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5224,12 +5234,12 @@ pub const Value = extern union { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5264,12 +5274,12 @@ pub const Value = extern union { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5311,7 +5321,7 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; @@ -5321,7 +5331,7 @@ pub const Value = extern union { var addend_buf: Value.ElemValueBuffer = undefined; const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); scalar.* = try mulAddScalar( - float_type.scalarType(), + float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, @@ -5380,8 +5390,7 @@ pub const Value = extern union { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { - const target = mod.getTarget(); - const abi_size = std.math.cast(usize, ty.abiSize(target)) orelse return null; + const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); @@ -5549,16 +5558,6 @@ pub const Value = extern union { data: Type, }; - pub const IntType = struct { - pub const base_tag = Tag.int_type; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - bits: u16, - signed: bool, - }, - }; - pub const Float_16 = struct { pub const base_tag = Tag.float_16; @@ -5659,7 +5658,10 @@ pub const Value = extern union { pub const zero = initTag(.zero); pub const one = initTag(.one); - pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base }; + pub const negative_one: Value = .{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, + }; pub const undef = initTag(.undef); pub const @"void" = initTag(.void_value); pub const @"null" = initTag(.null_value); -- cgit v1.2.3 From 00f82f1c46126f1fc6655c6142ef16e8e5afbf4e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 14:37:33 -0700 Subject: stage2: add `interned` AIR tag This required additionally passing the `InternPool` into some AIR methods. Also, implement `Type.isNoReturn` for interned types. --- src/Air.zig | 58 +++++---- src/InternPool.zig | 6 + src/Liveness.zig | 18 ++- src/Liveness/Verify.zig | 10 +- src/Module.zig | 3 +- src/Sema.zig | 92 ++----------- src/arch/aarch64/CodeGen.zig | 172 +++++++++++++------------ src/arch/arm/CodeGen.zig | 166 +++++++++++++----------- src/arch/riscv64/CodeGen.zig | 80 +++++++----- src/arch/sparc64/CodeGen.zig | 136 +++++++++++--------- src/arch/wasm/CodeGen.zig | 240 ++++++++++++++++++---------------- src/arch/x86_64/CodeGen.zig | 286 +++++++++++++++++++++-------------------- src/codegen/c.zig | 268 ++++++++++++++++++++------------------ src/codegen/llvm.zig | 299 ++++++++++++++++++++++--------------------- src/codegen/spirv.zig | 119 +++++++++-------- src/print_air.zig | 18 ++- src/type.zig | 40 ++++-- 17 files changed, 1050 insertions(+), 961 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index b60e8eda9d..be3ae119e4 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -11,6 +11,7 @@ const Air = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const InternPool = @import("InternPool.zig"); +const Module = @import("Module.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -401,6 +402,9 @@ pub const Inst = struct { constant, /// A comptime-known type. Uses the `ty` field. const_ty, + /// A comptime-known value via an index into the InternPool. + /// Uses the `interned` field. + interned, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -928,6 +932,7 @@ pub const Inst = struct { pub const Data = union { no_op: void, un_op: Ref, + interned: InternPool.Index, bin_op: struct { lhs: Ref, @@ -1147,18 +1152,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { - return .{ - .ip_index = InternPool.static_keys[ref_int].typeOf(), - .legacy = undefined, - }; + return InternPool.static_keys[ref_int].typeOf().toType(); } - return air.typeOfIndex(ref_int - ref_start_index); + return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1200,7 +1202,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .div_exact_optimized, .rem_optimized, .mod_optimized, - => return air.typeOf(datas[inst].bin_op.lhs), + => return air.typeOf(datas[inst].bin_op.lhs, ip), .sqrt, .sin, @@ -1218,7 +1220,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .trunc_float, .neg, .neg_optimized, - => return air.typeOf(datas[inst].un_op), + => return air.typeOf(datas[inst].un_op, ip), .cmp_lt, .cmp_lte, @@ -1280,6 +1282,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), + .interned => return ip.indexToKey(datas[inst].interned).typeOf().toType(), + .not, .bitcast, .load, @@ -1371,33 +1375,33 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), .call, .call_always_tail, .call_never_tail, .call_never_inline => { - const callee_ty = air.typeOf(datas[inst].pl_op.operand); + const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { - const ptr_ty = air.typeOf(datas[inst].bin_op.lhs); + const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); return ptr_ty.elemType(); }, .atomic_load => { - const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); return ptr_ty.elemType(); }, .atomic_rmw => { - const ptr_ty = air.typeOf(datas[inst].pl_op.operand); + const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); return ptr_ty.elemType(); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(), + .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), - .mul_add => return air.typeOf(datas[inst].pl_op.operand), + .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data; - return air.typeOf(extra.lhs); + return air.typeOf(extra.lhs, ip); }, .@"try" => { - const err_union_ty = air.typeOf(datas[inst].pl_op.operand); + const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); return err_union_ty.errorUnionPayload(); }, @@ -1465,7 +1469,7 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const Module) ?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); @@ -1476,7 +1480,7 @@ pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?V switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1489,10 +1493,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { return bytes[0..end :0]; } -/// Returns whether the given instruction must always be lowered, for instance because it can cause -/// side effects. If an instruction does not need to be lowered, and Liveness determines its result -/// is unused, backends should avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { +/// Returns whether the given instruction must always be lowered, for instance +/// because it can cause side effects. If an instruction does not need to be +/// lowered, and Liveness determines its result is unused, backends should +/// avoid lowering it. +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, @@ -1631,6 +1636,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .cmp_vector_optimized, .constant, .const_ty, + .interned, .is_null, .is_non_null, .is_null_ptr, @@ -1699,8 +1705,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index cc3f0c1e4b..eadaf0da5e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -241,6 +241,11 @@ pub const Item = struct { /// When adding a tag to this enum, consider adding a corresponding entry to /// `primitives` in AstGen.zig. pub const Index = enum(u32) { + pub const first_type: Index = .u1_type; + pub const last_type: Index = .empty_struct_type; + pub const first_value: Index = .undef; + pub const last_value: Index = .empty_struct; + u1_type, u8_type, i8_type, @@ -329,6 +334,7 @@ pub const Index = enum(u32) { bool_false, /// `.{}` (untyped) empty_struct, + /// Used for generic parameters where the type and value /// is not known until generic function instantiation. generic_poison, diff --git a/src/Liveness.zig b/src/Liveness.zig index 45d0705008..01fbee9e36 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -144,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .intern_pool = intern_pool, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -322,6 +323,7 @@ pub fn categorizeOperand( .ret_ptr, .constant, .const_ty, + .interned, .trap, .breakpoint, .dbg_stmt, @@ -820,6 +822,7 @@ pub const BigTomb = struct { const Analysis = struct { gpa: Allocator, air: Air, + intern_pool: *const InternPool, tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), @@ -971,6 +974,7 @@ fn analyzeInst( .constant, .const_ty, + .interned, => unreachable, .trap, @@ -1255,6 +1259,7 @@ fn analyzeOperands( ) Allocator.Error!void { const gpa = a.gpa; const inst_tags = a.air.instructions.items(.tag); + const ip = a.intern_pool; switch (pass) { .loop_analysis => { @@ -1265,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1290,7 +1295,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst)) { + if (!immediate_death or a.air.mustLower(inst, ip.*)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1301,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1821,6 +1826,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { /// Must be called with operands in reverse order. fn feed(big: *Self, op_ref: Air.Inst.Ref) !void { + const ip = big.a.intern_pool; // Note that after this, `operands_remaining` becomes the index of the current operand big.operands_remaining -= 1; @@ -1834,14 +1840,14 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty => return, + .constant, .const_ty, .interned => return, else => {}, } // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index a55ebe52a6..e05f1814ce 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -5,6 +5,7 @@ air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; @@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void { const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { + const ip = self.intern_pool; const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { // This instruction will not be lowered and should be ignored. continue; } @@ -42,6 +44,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .ret_ptr, .constant, .const_ty, + .interned, .breakpoint, .dbg_stmt, .dbg_inline_begin, @@ -554,7 +557,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndex(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty => {}, + .constant, .const_ty, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -576,7 +579,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty => unreachable, + .constant, .const_ty, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); @@ -604,4 +607,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const Verify = @This(); diff --git a/src/Module.zig b/src/Module.zig index 5756955d3c..4187ac206b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4397,7 +4397,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_air and !dump_llvm_ir) return; log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { @@ -4414,6 +4414,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .gpa = gpa, .air = air, .liveness = liveness, + .intern_pool = &mod.intern_pool, }; defer verify.deinit(); diff --git a/src/Sema.zig b/src/Sema.zig index 9b76fee68e..540474c84a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33007,7 +33007,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst); + return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { @@ -33019,88 +33019,14 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - switch (ty.ip_index) { - .u1_type => return .u1_type, - .u8_type => return .u8_type, - .i8_type => return .i8_type, - .u16_type => return .u16_type, - .i16_type => return .i16_type, - .u29_type => return .u29_type, - .u32_type => return .u32_type, - .i32_type => return .i32_type, - .u64_type => return .u64_type, - .i64_type => return .i64_type, - .u80_type => return .u80_type, - .u128_type => return .u128_type, - .i128_type => return .i128_type, - .usize_type => return .usize_type, - .isize_type => return .isize_type, - .c_char_type => return .c_char_type, - .c_short_type => return .c_short_type, - .c_ushort_type => return .c_ushort_type, - .c_int_type => return .c_int_type, - .c_uint_type => return .c_uint_type, - .c_long_type => return .c_long_type, - .c_ulong_type => return .c_ulong_type, - .c_longlong_type => return .c_longlong_type, - .c_ulonglong_type => return .c_ulonglong_type, - .c_longdouble_type => return .c_longdouble_type, - .f16_type => return .f16_type, - .f32_type => return .f32_type, - .f64_type => return .f64_type, - .f80_type => return .f80_type, - .f128_type => return .f128_type, - .anyopaque_type => return .anyopaque_type, - .bool_type => return .bool_type, - .void_type => return .void_type, - .type_type => return .type_type, - .anyerror_type => return .anyerror_type, - .comptime_int_type => return .comptime_int_type, - .comptime_float_type => return .comptime_float_type, - .noreturn_type => return .noreturn_type, - .anyframe_type => return .anyframe_type, - .null_type => return .null_type, - .undefined_type => return .undefined_type, - .enum_literal_type => return .enum_literal_type, - .atomic_order_type => return .atomic_order_type, - .atomic_rmw_op_type => return .atomic_rmw_op_type, - .calling_convention_type => return .calling_convention_type, - .address_space_type => return .address_space_type, - .float_mode_type => return .float_mode_type, - .reduce_op_type => return .reduce_op_type, - .call_modifier_type => return .call_modifier_type, - .prefetch_options_type => return .prefetch_options_type, - .export_options_type => return .export_options_type, - .extern_options_type => return .extern_options_type, - .type_info_type => return .type_info_type, - .manyptr_u8_type => return .manyptr_u8_type, - .manyptr_const_u8_type => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type => return .const_slice_u8_type, - .anyerror_void_error_union_type => return .anyerror_void_error_union_type, - .generic_poison_type => return .generic_poison_type, - .var_args_param_type => return .var_args_param_type, - .empty_struct_type => return .empty_struct_type, - - // values - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .one => unreachable, - .one_usize => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - _ => {}, - - .none => unreachable, + if (ty.ip_index != .none) { + if (@enumToInt(ty.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } switch (ty.tag()) { .u1 => return .u1_type, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4370977272..2846633275 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -521,7 +521,7 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); @@ -653,13 +653,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -845,6 +846,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1028,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1067,7 +1069,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1079,14 +1081,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .compare_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1094,7 +1096,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1126,9 +1128,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const raw_reg = try self.register_manager.allocReg(reg_owner, gp); - const ty = self.air.typeOfIndex(reg_owner); + const ty = self.typeOfIndex(reg_owner); const reg = self.registerAlias(raw_reg, ty); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1181,10 +1183,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const operand_info = operand_ty.intInfo(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { @@ -1201,14 +1203,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (dest_info.bits > operand_info.bits) { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } else { if (self.reuseOperand(inst, operand, 0, truncated)) { break :result truncated; } else { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } } @@ -1303,8 +1305,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -1325,7 +1327,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -1492,8 +1494,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1512,9 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2436,8 +2438,8 @@ fn ptrArithmetic( fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2487,8 +2489,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2525,10 +2527,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2653,10 +2655,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2877,10 +2879,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -3013,7 +3015,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOf(ty_op.operand); + const optional_ty = self.typeOf(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand); break :result try self.optionalPayload(inst, mcv, optional_ty); }; @@ -3132,7 +3134,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -3212,7 +3214,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -3266,12 +3268,12 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } const result: MCValue = result: { - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -3433,7 +3435,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3482,8 +3484,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -3499,7 +3501,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3516,8 +3518,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -3862,16 +3864,16 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -3886,7 +3888,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4068,8 +4070,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -4093,7 +4095,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4118,7 +4120,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); @@ -4194,7 +4196,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4247,7 +4249,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4294,7 +4296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4470,7 +4472,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4512,7 +4514,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4652,7 +4654,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4804,7 +4806,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4831,7 +4833,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4936,7 +4938,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(.{ .mcv = operand }, operand_ty); }; @@ -4947,7 +4949,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4962,7 +4964,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(.{ .mcv = operand }, operand_ty); }; @@ -4973,7 +4975,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4988,7 +4990,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4999,7 +5001,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5014,7 +5016,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -5025,7 +5027,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5093,7 +5095,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5241,7 +5243,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5249,14 +5251,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .compare_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5322,7 +5324,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5945,7 +5947,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5956,7 +5958,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6076,7 +6078,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6125,7 +6127,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6159,7 +6161,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6428,3 +6430,13 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { }, } } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 4c7151cd47..eb8cfa9707 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -477,6 +477,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // push {fp, lr} @@ -518,9 +519,8 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); @@ -637,13 +637,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -829,6 +830,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1008,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1050,7 +1052,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1064,14 +1066,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1081,7 +1083,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1151,15 +1153,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const operand_abi_size = operand_ty.abiSize(mod); const dest_abi_size = dest_ty.abiSize(mod); const info_a = operand_ty.intInfo(mod); @@ -1262,8 +1264,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); @@ -1284,7 +1286,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, @@ -1467,8 +1469,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1487,9 +1489,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -1501,8 +1503,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1552,8 +1554,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1590,10 +1592,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1703,10 +1705,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1865,10 +1867,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2019,10 +2021,10 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; + const optional_ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true @@ -2105,7 +2107,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -2182,7 +2184,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -2430,7 +2432,7 @@ fn ptrElemVal( fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2456,8 +2458,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -2523,7 +2525,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); }; @@ -2532,7 +2534,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2549,8 +2551,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -2736,13 +2738,13 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -2755,7 +2757,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dest_mcv, ptr, self.typeOf(ty_op.operand)); break :result dest_mcv; }; @@ -2860,8 +2862,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2885,7 +2887,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2910,7 +2912,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); @@ -4169,7 +4171,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4222,7 +4224,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4276,7 +4278,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4418,7 +4420,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4461,7 +4463,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4600,7 +4602,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4755,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4782,7 +4784,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4827,7 +4829,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(operand_bind, operand_ty); }; @@ -4838,7 +4840,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4853,7 +4855,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(operand_bind, operand_ty); }; @@ -4864,7 +4866,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4913,7 +4915,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4924,7 +4926,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4939,7 +4941,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4950,7 +4952,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5018,7 +5020,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5164,7 +5166,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5172,14 +5174,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5243,7 +5245,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5896,7 +5898,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5907,7 +5909,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6023,7 +6025,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6072,7 +6074,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const mod = self.bin_file.options.module.?; const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6107,7 +6109,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6333,3 +6335,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 75d5a87bf2..4ab798fe9c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -470,13 +470,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -658,6 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -804,8 +806,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -815,8 +817,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -845,7 +847,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -862,7 +864,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -894,10 +896,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1126,8 +1128,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1138,8 +1140,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1333,7 +1335,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true if (optional_ty.abiSize(mod) == 1) @@ -1525,15 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - const mod = self.bin_file.options.module.?; if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1545,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1586,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -1647,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; @@ -1704,7 +1706,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); + const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); @@ -1717,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (self.bin_file.cast(link.File.Elf)) |elf_file| { for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -1829,9 +1831,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; - assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); + assert(ty.eql(self.typeOf(bin_op.rhs), mod)); if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); @@ -1950,7 +1952,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1977,7 +1979,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2004,7 +2006,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2031,7 +2033,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2112,13 +2114,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2181,7 +2183,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -2377,7 +2379,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2494,7 +2496,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -2541,7 +2543,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; @@ -2733,3 +2735,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 63b604857e..e79a216315 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -490,13 +490,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -678,6 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -762,8 +764,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -836,7 +838,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -871,7 +873,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -935,7 +937,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -1008,16 +1010,16 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const arg = self.args[arg_index]; const mcv = blk: { switch (arg) { .stack_offset => |off| { - const mod = self.bin_file.options.module.?; const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; @@ -1063,8 +1065,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1088,8 +1090,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1115,7 +1117,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1218,7 +1220,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { @@ -1294,7 +1296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -1318,7 +1320,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { @@ -1428,7 +1430,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. @@ -1605,7 +1607,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -1632,7 +1634,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -1755,10 +1757,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1780,7 +1782,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1790,7 +1792,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1815,16 +1817,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1839,7 +1841,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1882,8 +1884,8 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -1897,8 +1899,8 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); assert(lhs_ty.eql(rhs_ty, self.bin_file.options.module.?)); if (self.liveness.isUnused(inst)) @@ -2045,8 +2047,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2108,7 +2110,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -2285,8 +2287,8 @@ fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); // TODO add safety check @@ -2341,8 +2343,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2429,9 +2431,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2453,7 +2455,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); const mod = self.bin_file.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -2544,8 +2546,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2573,7 +2575,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2659,8 +2661,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -2674,7 +2676,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union = try self.resolveInst(pl_op.operand); const is_err_result = try self.isErr(error_union_ty, error_union); const reloc = try self.condBr(is_err_result); @@ -2706,7 +2708,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); const mod = self.bin_file.options.module.?; @@ -2720,7 +2722,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; @@ -2753,12 +2755,12 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - const mod = self.bin_file.options.module.?; if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; @@ -2794,9 +2796,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); - const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all @@ -2814,8 +2816,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -3406,7 +3408,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3415,13 +3417,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .register, .stack_offset, .memory => operand_mcv, .immediate => blk: { const new_mcv = try self.allocRegOrMem(block, true); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -4549,7 +4551,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -4654,7 +4656,7 @@ fn spillConditionFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4678,7 +4680,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { @@ -4726,7 +4728,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4885,3 +4887,13 @@ fn wantSafety(self: *Self) bool { .ReleaseSmall => false, }; } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b592ffcb2a..cd61eaf1fb 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -790,7 +790,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const val = func.air.value(ref, mod).?; - const ty = func.air.typeOf(ref); + const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; @@ -1260,7 +1260,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); - const last_inst_ty = func.air.typeOfIndex(inst); + const last_inst_ty = func.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } @@ -1541,7 +1541,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; - const ptr_ty = func.air.typeOfIndex(inst); + const ptr_ty = func.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); if (func.initial_stack_value == .none) { @@ -1834,6 +1834,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return switch (air_tags[inst]) { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), @@ -2073,8 +2074,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; + for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -2134,8 +2138,8 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const child_type = func.air.typeOfIndex(inst).childType(); const mod = func.bin_file.base.options.module.?; + const child_type = func.typeOfIndex(inst).childType(); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2157,7 +2161,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.air.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2179,7 +2183,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -2228,7 +2232,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif for (args) |arg| { const arg_val = try func.resolveInst(arg); - const arg_ty = func.air.typeOf(arg); + const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); @@ -2296,7 +2300,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr_info = ptr_ty.ptrInfo().data; const ty = ptr_ty.childType(); @@ -2449,7 +2453,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); - const ptr_ty = func.air.typeOf(ty_op.operand); + const ptr_ty = func.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2522,11 +2526,11 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; - const arg_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { @@ -2572,8 +2576,8 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2770,7 +2774,7 @@ const FloatOp = enum { fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ty = func.air.typeOf(un_op); + const ty = func.typeOf(un_op); const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty); func.finishAir(inst, result, &.{un_op}); @@ -2847,8 +2851,8 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); @@ -3387,7 +3391,7 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.air.typeOf(bin_op.lhs); + const operand_ty = func.typeOf(bin_op.lhs); const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } @@ -3488,7 +3492,7 @@ fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3509,7 +3513,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -3575,8 +3579,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const result = result: { const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.air.typeOfIndex(inst); - const given_ty = func.air.typeOf(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); @@ -3609,7 +3613,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.air.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } @@ -3617,7 +3621,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.air.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3632,7 +3636,7 @@ fn structFieldPtr( index: u32, ) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { @@ -3663,7 +3667,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.air.typeOf(struct_field.struct_operand); + const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -3762,7 +3766,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const target = try func.resolveInst(pl_op.operand); - const target_ty = func.air.typeOf(pl_op.operand); + const target_ty = func.typeOf(pl_op.operand); const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); defer func.gpa.free(liveness.deaths); @@ -3940,7 +3944,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const err_union_ty = func.air.typeOf(un_op); + const err_union_ty = func.typeOf(un_op); const pl_ty = err_union_ty.errorUnionPayload(); const result = result: { @@ -3976,7 +3980,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4004,7 +4008,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4028,9 +4032,9 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.air.typeOfIndex(inst); + const err_ty = func.typeOfIndex(inst); - const pl_ty = func.air.typeOf(ty_op.operand); + const pl_ty = func.typeOf(ty_op.operand); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); @@ -4082,7 +4086,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); @@ -4155,7 +4159,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const op_ty = func.air.typeOf(un_op); + const op_ty = func.typeOf(un_op); const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); @@ -4196,8 +4200,8 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const opt_ty = func.air.typeOf(ty_op.operand); - const payload_ty = func.air.typeOfIndex(inst); + const opt_ty = func.typeOf(ty_op.operand); + const payload_ty = func.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } @@ -4219,7 +4223,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4238,7 +4242,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4263,7 +4267,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const payload_ty = func.air.typeOf(ty_op.operand); + const payload_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4276,7 +4280,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOfIndex(inst); + const op_ty = func.typeOfIndex(inst); if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4304,7 +4308,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.air.typeOfIndex(inst); + const slice_ty = func.typeOfIndex(inst); const slice = try func.allocStack(slice_ty); try func.store(slice, lhs, Type.usize, 0); @@ -4323,7 +4327,7 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const slice_ty = func.air.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -4395,7 +4399,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.air.getRefType(ty_op.ty); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const result = try func.trunc(operand, wanted_ty, op_ty); func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand}); @@ -4432,7 +4436,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.air.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4453,7 +4457,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ptr_ty = func.air.typeOf(un_op); + const ptr_ty = func.typeOf(un_op); const result = if (ptr_ty.isSlice()) try func.slicePtr(operand) else switch (operand) { @@ -4467,7 +4471,7 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); @@ -4505,7 +4509,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); const mod = func.bin_file.base.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -4538,7 +4542,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const pointee_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), @@ -4568,7 +4572,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize()) { .Slice => try func.sliceLen(ptr), @@ -4683,7 +4687,7 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const array_ty = func.air.typeOf(bin_op.lhs); + const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); @@ -4750,12 +4754,12 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); @@ -4775,12 +4779,12 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); @@ -4804,7 +4808,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const elem_ty = ty.childType(); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { @@ -4881,7 +4885,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const inst_ty = func.air.typeOfIndex(inst); + const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4894,7 +4898,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { + if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { @@ -4951,11 +4955,11 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); - const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { @@ -5085,7 +5089,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.air.typeOfIndex(inst); + const union_ty = func.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; @@ -5164,7 +5168,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.addLabel(.memory_size, pl_op.payload); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{pl_op.operand}); @@ -5174,7 +5178,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; const operand = try func.resolveInst(pl_op.operand); - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.emitWValue(operand); try func.addLabel(.memory_grow, pl_op.payload); try func.addLabel(.local_set, result.local.value); @@ -5263,8 +5267,8 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.air.typeOf(bin_op.lhs).childType(); - const tag_ty = func.air.typeOf(bin_op.rhs); + const un_ty = func.typeOf(bin_op.lhs).childType(); + const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5288,8 +5292,8 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const un_ty = func.air.typeOf(ty_op.operand); - const tag_ty = func.air.typeOfIndex(inst); + const un_ty = func.typeOf(ty_op.operand); + const tag_ty = func.typeOfIndex(inst); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -5307,9 +5311,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty); + const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); const result = try extended.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5352,9 +5356,9 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty); + const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); const result = try truncated.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5393,7 +5397,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.air.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5448,10 +5452,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.air.typeOf(bin_op.lhs); + const dst_ty = func.typeOf(bin_op.lhs); const ptr_elem_ty = dst_ty.childType(); const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.air.typeOf(bin_op.rhs); + const src_ty = func.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); @@ -5485,12 +5489,12 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const op_ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); @@ -5585,7 +5589,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5599,7 +5603,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -5649,7 +5653,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro var overflow_local = try overflow_bit.toLocal(func, Type.u32); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5729,8 +5733,8 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); - const rhs_ty = func.air.typeOf(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -5771,7 +5775,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5785,7 +5789,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5946,7 +5950,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); @@ -5955,10 +5959,10 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } @@ -5986,11 +5990,11 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -6020,11 +6024,11 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } @@ -6073,12 +6077,12 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } @@ -6141,7 +6145,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); @@ -6179,7 +6183,7 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.resolveInst(pl_op.operand); const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(pl_op.operand); + const err_union_ty = func.typeOf(pl_op.operand); const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } @@ -6189,7 +6193,7 @@ fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6251,11 +6255,11 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); @@ -6325,7 +6329,7 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6340,7 +6344,7 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6361,7 +6365,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6512,7 +6516,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6626,7 +6630,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6785,11 +6789,11 @@ fn callIntrinsic( fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const enum_ty = func.air.typeOf(un_op); + const enum_ty = func.typeOf(un_op); const func_sym_index = try func.getTagNameFunction(enum_ty); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.lowerToStack(result_ptr); try func.emitWValue(operand); try func.addLabel(.call, func_sym_index); @@ -7061,9 +7065,9 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(extra.ptr); + const ptr_ty = func.typeOf(extra.ptr); const ty = ptr_ty.childType(); - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); const expected_val = try func.resolveInst(extra.expected_value); @@ -7133,7 +7137,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { @@ -7163,7 +7167,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(pl_op.operand); const operand = try func.resolveInst(extra.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); if (func.useAtomicFeature()) { @@ -7348,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { @@ -7380,3 +7384,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try WValue.toLocal(.stack, func, Type.usize); return func.finishAir(inst, result, &.{}); } + +fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 826bca2266..865ebe02f7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -447,7 +447,7 @@ const InstTracking = struct { else => unreachable, } tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.air.typeOfIndex(inst), self.long, self.short); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); } fn reuseFrame(self: *InstTracking) void { @@ -537,7 +537,7 @@ const InstTracking = struct { inst: Air.Inst.Index, target: InstTracking, ) !void { - const ty = function.air.typeOfIndex(inst); + const ty = function.typeOfIndex(inst); if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) try function.genCopy(ty, target.long, self.short); try function.genCopy(ty, target.short, self.short); @@ -1725,6 +1725,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { @@ -1733,7 +1735,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -1919,6 +1921,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2255,7 +2258,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { @@ -2266,7 +2269,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok); + return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok); } fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { @@ -2485,7 +2488,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .load_frame => .{ .register_offset = .{ .reg = (try self.copyToRegisterWithInstTracking( inst, - self.air.typeOfIndex(inst), + self.typeOfIndex(inst), self.ret_mcv.long, )).register, .off = self.ret_mcv.short.indirect.off, @@ -2496,9 +2499,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2562,9 +2565,9 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2625,10 +2628,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_int_info = src_ty.intInfo(mod); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); const abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -2707,9 +2710,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { @@ -2818,7 +2821,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const operand = try self.resolveInst(un_op); const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand)) @@ -2834,11 +2837,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -2877,7 +2880,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); - const dst_ty = self.air.typeOf(dst_air); + const dst_ty = self.typeOf(dst_air); const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { @@ -2889,7 +2892,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { - const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); + const src_ty = self.typeOf(air_data[inst].ty_op.operand); const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { @@ -2913,7 +2916,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, @@ -2942,7 +2945,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3021,7 +3024,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3093,7 +3096,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3151,7 +3154,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { @@ -3168,7 +3171,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .signed => .o, }; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3211,8 +3214,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { @@ -3241,7 +3244,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3349,7 +3352,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const dst_ty = self.air.typeOf(bin_op.lhs); + const dst_ty = self.typeOf(bin_op.lhs); const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { @@ -3370,7 +3373,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const extra_bits = if (dst_info.bits <= 64) self.regExtraBits(dst_ty) else @@ -3525,8 +3528,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); @@ -3543,7 +3546,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOfIndex(inst); + const pl_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -3568,7 +3571,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -3582,8 +3585,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); @@ -3615,7 +3618,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); @@ -3662,7 +3665,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3720,7 +3723,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3756,7 +3759,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3765,7 +3768,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3791,7 +3794,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3816,7 +3819,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :result .unreach; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3856,10 +3859,10 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOf(ty_op.operand); + const pl_ty = self.typeOf(ty_op.operand); if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; - const opt_ty = self.air.typeOfIndex(inst); + const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; @@ -3952,7 +3955,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -3980,7 +3983,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3989,7 +3992,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -4014,7 +4017,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -4046,7 +4049,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const slice_ty = self.air.typeOf(lhs); + const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4059,7 +4062,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); - const index_ty = self.air.typeOf(rhs); + const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4083,7 +4086,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4105,7 +4108,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4116,7 +4119,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = array_ty.childType(); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4170,14 +4173,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4218,7 +4221,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.lhs); + const ptr_ty = self.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4228,7 +4231,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(extra.rhs); + const index_ty = self.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4249,9 +4252,9 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_union_ty = self.air.typeOf(bin_op.lhs); + const ptr_union_ty = self.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); - const tag_ty = self.air.typeOf(bin_op.rhs); + const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4296,8 +4299,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const tag_ty = self.air.typeOfIndex(inst); - const union_ty = self.air.typeOf(ty_op.operand); + const tag_ty = self.typeOfIndex(inst); + const union_ty = self.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4350,8 +4353,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4479,8 +4482,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4575,7 +4578,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4745,7 +4748,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true); @@ -4766,7 +4769,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4876,12 +4879,12 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), }; const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); @@ -5005,7 +5008,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5093,7 +5096,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); @@ -5399,7 +5402,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5407,7 +5410,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const elem_size = elem_ty.abiSize(mod); const elem_rc = regClassForType(elem_ty, mod); @@ -5548,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); if (ptr_ty.ptrInfo().data.host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); @@ -5573,8 +5576,8 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { const mod = self.bin_file.options.module.?; - const ptr_field_ty = self.air.typeOfIndex(inst); - const ptr_container_ty = self.air.typeOf(operand); + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), @@ -5602,7 +5605,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; - const container_ty = self.air.typeOf(operand); + const container_ty = self.typeOf(operand); const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5756,7 +5759,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); @@ -5772,7 +5775,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const src_ty = self.air.typeOf(src_air); + const src_ty = self.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); @@ -6358,8 +6361,8 @@ fn genBinOp( rhs_air: Air.Inst.Ref, ) !MCValue { const mod = self.bin_file.options.module.?; - const lhs_ty = self.air.typeOf(lhs_air); - const rhs_ty = self.air.typeOf(rhs_air); + const lhs_ty = self.typeOf(lhs_air); + const rhs_ty = self.typeOf(rhs_air); const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { @@ -7918,6 +7921,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -7931,9 +7935,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{dst_mcv}), } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(mod, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -8050,7 +8054,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -8085,7 +8089,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, } for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => {}, @@ -8112,7 +8116,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier defer if (ret_lock) |lock| self.register_manager.unlockReg(lock); for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none, .load_frame => {}, @@ -8241,7 +8245,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); switch (self.ret_mcv.short) { .none => {}, .register => try self.load(self.ret_mcv.short, ptr_ty, ptr), @@ -8258,7 +8262,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8476,7 +8480,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const op_ty = self.air.typeOf(un_op); + const op_ty = self.typeOf(un_op); const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { @@ -8496,7 +8500,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); + const err_union_ty = self.typeOf(pl_op.operand); const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8505,7 +8509,7 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8584,7 +8588,7 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -8626,7 +8630,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; @@ -8871,7 +8875,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNull(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8879,7 +8883,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNullPtr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8887,7 +8891,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNull(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8898,7 +8902,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8909,7 +8913,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8932,7 +8936,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isErr(inst, ptr_ty.childType(), operand); @@ -8943,7 +8947,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNonErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8966,7 +8970,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isNonErr(inst, ptr_ty.childType(), operand); @@ -9032,7 +9036,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; @@ -9119,7 +9123,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); - const block_ty = self.air.typeOfIndex(br.block_inst); + const block_ty = self.typeOfIndex(br.block_inst); const block_unused = !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; @@ -9244,7 +9248,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(reg, self.air.typeOf(input), arg_mcv); + try self.genSetReg(reg, self.typeOf(input), arg_mcv); } { @@ -10169,7 +10173,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -10179,8 +10183,8 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const result = result: { const dst_rc = regClassForType(dst_ty, mod); @@ -10241,8 +10245,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const slice_ty = self.air.typeOfIndex(inst); - const ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ty = self.typeOfIndex(inst); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); @@ -10264,11 +10268,11 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, @@ -10318,8 +10322,8 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const dst_ty = self.air.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10371,8 +10375,8 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.ptr); - const val_ty = self.air.typeOf(extra.expected_value); + const ptr_ty = self.typeOf(extra.ptr); + const val_ty = self.typeOf(extra.expected_value); const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); @@ -10712,10 +10716,10 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const unused = self.liveness.isUnused(inst); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const ptr_mcv = try self.resolveInst(pl_op.operand); - const val_ty = self.air.typeOf(extra.operand); + const val_ty = self.typeOf(extra.operand); const val_mcv = try self.resolveInst(extra.operand); const result = @@ -10726,7 +10730,7 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_mcv = try self.resolveInst(atomic_load.ptr); const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -10747,10 +10751,10 @@ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const val_ty = self.air.typeOf(bin_op.rhs); + const val_ty = self.typeOf(bin_op.rhs); const val_mcv = try self.resolveInst(bin_op.rhs); const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order); @@ -10768,7 +10772,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10776,7 +10780,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10888,7 +10892,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10922,8 +10926,8 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const inst_ty = self.air.typeOfIndex(inst); - const enum_ty = self.air.typeOf(un_op); + const inst_ty = self.typeOfIndex(inst); + const enum_ty = self.typeOf(un_op); // We need a properly aligned and sized call frame to be able to call this function. { @@ -10964,7 +10968,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const err_ty = self.air.typeOf(un_op); + const err_ty = self.typeOf(un_op); const err_mcv = try self.resolveInst(un_op); const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); @@ -11046,7 +11050,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const dst_rc = regClassForType(vector_ty, mod); const scalar_ty = vector_ty.scalarType(mod); @@ -11266,7 +11270,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -11411,10 +11415,10 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); - const src_ty = self.air.typeOf(extra.init); + const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; @@ -11461,7 +11465,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), @@ -11609,7 +11613,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -11713,7 +11717,7 @@ fn resolveCallingConventionValues( defer self.gpa.free(param_types); fn_ty.fnParamTypes(param_types); // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.air.typeOf(arg); + for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -12023,3 +12027,13 @@ fn hasAnyFeatures(self: *Self, features: anytype) bool { fn hasAllFeatures(self: *Self, features: anytype) bool { return Target.x86.featureSetHasAll(self.target.cpu.features, features); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/codegen/c.zig b/src/codegen/c.zig index da040a6fbb..a87f37b1c9 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -288,7 +288,7 @@ pub const Function = struct { const mod = f.object.dg.module; const val = f.air.value(ref, mod).?; - const ty = f.air.typeOf(ref); + const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); @@ -355,7 +355,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, @@ -368,7 +368,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); @@ -382,7 +382,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); @@ -396,7 +396,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); @@ -486,6 +486,16 @@ pub const Function = struct { f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); } + + fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { + const mod = f.object.dg.module; + return f.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { + const mod = f.object.dg.module; + return f.air.typeOfIndex(inst, mod.intern_pool); + } }; /// This data is available when outputting .c code for a `Module`. @@ -2802,17 +2812,19 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) { + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) continue; - } const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies + .arg => try airArg(f, inst), .trap => try airTrap(f.object.writer()), @@ -2837,7 +2849,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); + const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. break :blk if (lhs_scalar_ty.isInt(mod)) @@ -3088,7 +3100,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -3107,7 +3119,7 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3136,8 +3148,8 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3169,7 +3181,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3198,8 +3210,8 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const slice_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3226,7 +3238,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; @@ -3251,7 +3263,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_type = inst_ty.elemType(); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3267,7 +3279,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3282,7 +3294,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_cty = try f.typeToIndex(inst_ty, .parameter); const i = f.next_arg_index; @@ -3309,7 +3321,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; @@ -3399,7 +3411,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); - const op_ty = f.air.typeOf(un_op); + const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3453,9 +3465,9 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -3478,13 +3490,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const scalar_int_info = scalar_ty.intInfo(mod); @@ -3572,7 +3584,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); @@ -3587,12 +3599,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const src_ty = f.typeOf(bin_op.rhs); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; @@ -3737,8 +3749,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const operand_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); @@ -3769,14 +3781,14 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3802,7 +3814,7 @@ fn airBinOp( ) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); @@ -3811,7 +3823,7 @@ fn airBinOp( const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3839,7 +3851,7 @@ fn airCmpOp( operator: std.math.CompareOperator, ) !CValue { const mod = f.object.dg.module; - const lhs_ty = f.air.typeOf(data.lhs); + const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(mod); const scalar_bits = scalar_ty.bitSize(mod); @@ -3855,12 +3867,12 @@ fn airCmpOp( if (scalar_ty.isRuntimeFloat()) return airCmpBuiltinCall(f, inst, data, operator, .operator, .none); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const rhs_ty = f.air.typeOf(data.rhs); + const rhs_ty = f.typeOf(data.rhs); const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer(); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3891,7 +3903,7 @@ fn airEquality( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_bits = operand_ty.bitSize(mod); if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( @@ -3910,7 +3922,7 @@ fn airEquality( try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -3954,7 +3966,7 @@ fn airEquality( fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3976,7 +3988,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const elem_ty = inst_scalar_ty.elemType2(mod); @@ -4019,7 +4031,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) @@ -4065,7 +4077,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const len = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = inst_ty.slicePtrFieldType(&buf); @@ -4110,7 +4122,7 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { - const arg_ty = f.air.typeOf(arg); + const arg_ty = f.typeOf(arg); const arg_cty = try f.typeToIndex(arg_ty, .parameter); if (f.indexToCType(arg_cty).tag() == .void) { resolved_arg.* = .none; @@ -4141,7 +4153,7 @@ fn airCall( for (args) |arg| try bt.feed(arg); } - const callee_ty = f.air.typeOf(pl_op.operand); + const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), @@ -4279,7 +4291,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { f.next_block_index += 1; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else @@ -4302,7 +4314,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.air.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn()) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4314,7 +4326,7 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Try, pl_op.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(pl_op.operand); + const err_union_ty = f.typeOf(pl_op.operand); return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false); } @@ -4322,7 +4334,7 @@ fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4336,7 +4348,7 @@ fn lowerTry( ) !CValue { const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); @@ -4404,7 +4416,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { // If result is .none then the value of the block is unused. if (result != .none) { - const operand_ty = f.air.typeOf(branch.operand); + const operand_ty = f.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); @@ -4421,10 +4433,10 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const dest_ty = f.air.typeOfIndex(inst); + const dest_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const bitcasted = try bitcast(f, dest_ty, operand, operand_ty); try reap(f, inst, &.{ty_op.operand}); @@ -4684,7 +4696,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); - const condition_ty = f.air.typeOf(pl_op.operand); + const condition_ty = f.typeOf(pl_op.operand); const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload); const writer = f.object.writer(); @@ -4784,7 +4796,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { @@ -4814,7 +4826,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.air.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -4847,7 +4859,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[0] == '{'; const input_val = try f.resolveInst(input); if (asmInputNeedsLocal(constraint, input_val)) { - const input_ty = f.air.typeOf(input); + const input_ty = f.typeOf(input); if (is_reg) try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(input_ty, alignment); @@ -5048,7 +5060,7 @@ fn airIsNull( try f.writeCValue(writer, operand, .Other); } - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; var payload_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&payload_buf); @@ -5083,7 +5095,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const opt_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.typeOf(ty_op.operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); @@ -5092,7 +5104,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { return .none; } - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5119,9 +5131,9 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const opt_ty = ptr_ty.childType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; @@ -5149,11 +5161,11 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const opt_ty = operand_ty.elemType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { @@ -5249,7 +5261,7 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { const container_ptr_val = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const container_ptr_ty = f.air.typeOf(extra.struct_operand); + const container_ptr_ty = f.typeOf(extra.struct_operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index); } @@ -5258,7 +5270,7 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue const container_ptr_val = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const container_ptr_ty = f.air.typeOf(ty_op.operand); + const container_ptr_ty = f.typeOf(ty_op.operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index); } @@ -5267,10 +5279,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const container_ptr_ty = f.air.typeOfIndex(inst); + const container_ptr_ty = f.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); - const field_ptr_ty = f.air.typeOf(extra.field_ptr); + const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); @@ -5334,7 +5346,7 @@ fn fieldPtr( ) !CValue { const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); - const field_ptr_ty = f.air.typeOfIndex(inst); + const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(container_ty, .complete); @@ -5385,7 +5397,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; @@ -5393,7 +5405,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const struct_ty = f.air.typeOf(extra.struct_operand); + const struct_ty = f.typeOf(extra.struct_operand); const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. @@ -5514,9 +5526,9 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; @@ -5553,10 +5565,10 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); @@ -5589,9 +5601,9 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const repr_is_payload = inst_ty.optionalReprIsPayload(mod); - const payload_ty = f.air.typeOf(ty_op.operand); + const payload_ty = f.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5621,7 +5633,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); @@ -5661,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.air.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5684,7 +5696,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { // Then return the payload pointer (only if it is used) if (f.liveness.isUnused(inst)) return .none; - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, f.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = &("); try f.writeCValueDeref(writer, operand); @@ -5711,7 +5723,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -5747,7 +5759,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const writer = f.object.writer(); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); @@ -5780,10 +5792,10 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.air.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5812,10 +5824,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" @@ -5855,9 +5867,9 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -5885,9 +5897,9 @@ fn airUnBuiltinCall( const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -5927,7 +5939,7 @@ fn airBinBuiltinCall( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_cty = try f.typeToCType(operand_ty, .complete); const is_big = operand_cty.tag() == .array; @@ -5935,7 +5947,7 @@ fn airBinBuiltinCall( const rhs = try f.resolveInst(bin_op.rhs); if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const scalar_ty = operand_ty.scalarType(mod); @@ -5984,9 +5996,9 @@ fn airCmpBuiltinCall( const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(data.lhs); + const operand_ty = f.typeOf(data.lhs); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -6032,11 +6044,11 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const ptr = try f.resolveInst(extra.ptr); const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); - const ptr_ty = f.air.typeOf(extra.ptr); + const ptr_ty = f.typeOf(extra.ptr); const ty = ptr_ty.childType(); const writer = f.object.writer(); @@ -6137,8 +6149,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(pl_op.operand); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(pl_op.operand); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6193,7 +6205,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); - const ptr_ty = f.air.typeOf(atomic_load.ptr); + const ptr_ty = f.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); const repr_ty = if (ty.isRuntimeFloat()) @@ -6201,7 +6213,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { else ty; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6227,7 +6239,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6270,10 +6282,10 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const dest_ty = f.air.typeOf(bin_op.lhs); + const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); - const elem_ty = f.air.typeOf(bin_op.rhs); + const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); @@ -6393,8 +6405,8 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); - const dest_ty = f.air.typeOf(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const dest_ty = f.typeOf(bin_op.lhs); + const src_ty = f.typeOf(bin_op.rhs); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6434,7 +6446,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.air.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6455,11 +6467,11 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const union_ty = f.air.typeOf(ty_op.operand); + const union_ty = f.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); @@ -6473,8 +6485,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); - const enum_ty = f.air.typeOf(un_op); + const inst_ty = f.typeOfIndex(inst); + const enum_ty = f.typeOf(un_op); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -6494,7 +6506,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, inst_ty); @@ -6513,7 +6525,7 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -6539,7 +6551,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const rhs = try f.resolveInst(extra.rhs); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6570,7 +6582,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6607,10 +6619,10 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const reduce = f.air.instructions.items(.data)[inst].reduce; const target = mod.getTarget(); - const scalar_ty = f.air.typeOfIndex(inst); + const scalar_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); - const operand_ty = f.air.typeOf(reduce.operand); + const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); const use_operator = scalar_ty.bitSize(mod) <= 64; @@ -6762,7 +6774,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; @@ -6892,10 +6904,10 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = f.air.typeOfIndex(inst); + const union_ty = f.typeOfIndex(inst); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; - const payload_ty = f.air.typeOf(extra.init); + const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); @@ -6965,7 +6977,7 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -6979,7 +6991,7 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); const local = try f.allocLocal(inst, inst_ty); @@ -6999,7 +7011,7 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -7025,7 +7037,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7054,7 +7066,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7088,7 +7100,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const addend = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7114,7 +7126,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; @@ -7133,7 +7145,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -7164,7 +7176,7 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c3d3da0d32..2e42e8e3fc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4497,7 +4497,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ - .ty = self.air.typeOf(inst), + .ty = self.typeOf(inst), .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; @@ -4528,11 +4528,12 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { + const mod = self.dg.module; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const opt_value: ?*llvm.Value = switch (air_tags[inst]) { // zig fmt: off @@ -4751,6 +4752,8 @@ pub const FuncGen = struct { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, + .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), @@ -4781,7 +4784,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, @@ -4815,7 +4818,7 @@ pub const FuncGen = struct { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); if (isByRef(param_ty, mod)) { @@ -4829,7 +4832,7 @@ pub const FuncGen = struct { }, .byref => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); @@ -4844,7 +4847,7 @@ pub const FuncGen = struct { }, .byref_mut => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const alignment = param_ty.abiAlignment(mod); @@ -4865,7 +4868,7 @@ pub const FuncGen = struct { }, .abi_sized_int => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); @@ -4901,7 +4904,7 @@ pub const FuncGen = struct { }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); @@ -4930,7 +4933,7 @@ pub const FuncGen = struct { }, .float_array => |count| { const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -4950,7 +4953,7 @@ pub const FuncGen = struct { .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -5094,7 +5097,7 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ret_ty = self.air.typeOf(un_op); + const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -5150,7 +5153,7 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); const mod = self.dg.module; @@ -5236,7 +5239,7 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; - const va_list_ty = self.air.typeOfIndex(inst); + const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); const result_alignment = va_list_ty.abiAlignment(mod); @@ -5266,7 +5269,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const operand_ty = self.air.typeOf(bin_op.lhs); + const operand_ty = self.typeOf(bin_op.lhs); return self.cmp(lhs, rhs, operand_ty, op); } @@ -5279,7 +5282,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const vec_ty = self.air.typeOf(extra.lhs); + const vec_ty = self.typeOf(extra.lhs); const cmp_op = extra.compareOperator(); return self.cmp(lhs, rhs, vec_ty, cmp_op); @@ -5396,12 +5399,12 @@ pub const FuncGen = struct { } fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5453,7 +5456,7 @@ pub const FuncGen = struct { const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. - const operand_ty = self.air.typeOf(branch.operand); + const operand_ty = self.typeOf(branch.operand); const mod = self.dg.module; if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); @@ -5497,8 +5500,8 @@ pub const FuncGen = struct { const err_union = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); @@ -5509,7 +5512,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5650,7 +5653,7 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn()) { _ = self.builder.buildBr(loop_block); } return null; @@ -5659,11 +5662,11 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); @@ -5683,10 +5686,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const target = mod.getTarget(); @@ -5743,10 +5746,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); @@ -5832,7 +5835,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); - const slice_ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ptr_ty = self.typeOf(ty_op.operand); const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); @@ -5842,7 +5845,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -5863,7 +5866,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); @@ -5878,7 +5881,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); @@ -5920,7 +5923,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); @@ -5948,7 +5951,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); @@ -5973,7 +5976,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); - const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ptr_ty = self.typeOf(struct_field.struct_operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index); } @@ -5984,7 +5987,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); + const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } @@ -5993,7 +5996,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -6234,7 +6237,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const di_local_var = dib.createAutoVariable( self.di_scope.?, @@ -6259,7 +6262,7 @@ pub const FuncGen = struct { const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); - const operand_ty = self.air.typeOf(pl_op.operand); + const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); if (needDbgVarWorkaround(self.dg)) { @@ -6361,7 +6364,7 @@ pub const FuncGen = struct { llvm_ret_indirect[i] = (output != .none) and constraintAllowsMemory(constraint); if (output != .none) { const output_inst = try self.resolveInst(output); - const output_ty = self.air.typeOf(output); + const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); @@ -6379,7 +6382,7 @@ pub const FuncGen = struct { llvm_ret_i += 1; } } else { - const ret_ty = self.air.typeOfIndex(inst); + const ret_ty = self.typeOfIndex(inst); llvm_ret_types[llvm_ret_i] = try self.dg.lowerType(ret_ty); llvm_ret_i += 1; } @@ -6414,7 +6417,7 @@ pub const FuncGen = struct { extra_i += (constraint.len + name.len + (2 + 3)) / 4; const arg_llvm_value = try self.resolveInst(input); - const arg_ty = self.air.typeOf(input); + const arg_ty = self.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); @@ -6636,7 +6639,7 @@ pub const FuncGen = struct { if (output != .none) { const output_ptr = try self.resolveInst(output); - const output_ptr_ty = self.air.typeOf(output); + const output_ptr_ty = self.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); @@ -6657,7 +6660,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; @@ -6706,7 +6709,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); @@ -6746,7 +6749,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6768,7 +6771,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); @@ -6801,8 +6804,8 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; if (optional_ty.optionalReprIsPayload(mod)) { @@ -6824,9 +6827,9 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6859,7 +6862,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); @@ -6893,7 +6896,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6946,12 +6949,12 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand; } @@ -6976,9 +6979,9 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } @@ -7009,7 +7012,7 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -7069,7 +7072,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Bin, data.payload).data; const vector_ptr = try self.resolveInst(data.vector_ptr); - const vector_ptr_ty = self.air.typeOf(data.vector_ptr); + const vector_ptr_ty = self.typeOf(data.vector_ptr); const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); @@ -7090,7 +7093,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); @@ -7102,7 +7105,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); @@ -7114,7 +7117,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` @@ -7130,7 +7133,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); @@ -7153,7 +7156,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); @@ -7169,7 +7172,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); @@ -7192,7 +7195,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); @@ -7207,7 +7210,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); @@ -7230,7 +7233,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); @@ -7244,7 +7247,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } @@ -7256,7 +7259,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7274,7 +7277,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7314,7 +7317,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); @@ -7329,7 +7332,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7344,7 +7347,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(mod); @@ -7386,7 +7389,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7412,7 +7415,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7447,9 +7450,9 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); + const lhs_ty = self.typeOf(extra.lhs); const scalar_ty = lhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; @@ -7735,7 +7738,7 @@ pub const FuncGen = struct { const mulend2 = try self.resolveInst(extra.rhs); const addend = try self.resolveInst(pl_op.operand); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } @@ -7747,12 +7750,12 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) @@ -7821,8 +7824,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7841,8 +7844,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); - const rhs_type = self.air.typeOf(bin_op.rhs); + const lhs_type = self.typeOf(bin_op.lhs); + const rhs_type = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_type.scalarType(mod); const rhs_scalar_ty = rhs_type.scalarType(mod); @@ -7860,8 +7863,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const lhs_bits = lhs_scalar_ty.bitSize(mod); @@ -7903,8 +7906,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7932,11 +7935,11 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { @@ -7954,7 +7957,7 @@ pub const FuncGen = struct { fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -7962,8 +7965,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7992,8 +7995,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -8021,16 +8024,16 @@ pub const FuncGen = struct { fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - const inst_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); return self.bitCast(operand, operand_ty, inst_ty); } @@ -8159,17 +8162,17 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); if (self.dg.object.di_builder) |dib| { if (needDbgVarWorkaround(self.dg)) { return arg_val; } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const mod = self.dg.module; const func = self.dg.decl.getFunction().?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; @@ -8203,9 +8206,9 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8214,9 +8217,9 @@ pub const FuncGen = struct { } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,7 +8235,7 @@ pub const FuncGen = struct { fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); const mod = self.dg.module; @@ -8285,7 +8288,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = fg.air.typeOf(ty_op.operand); + const ptr_ty = fg.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; const ptr = try fg.resolveInst(ty_op.operand); @@ -8361,7 +8364,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.air.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8383,7 +8386,7 @@ pub const FuncGen = struct { ); result.setWeak(llvm.Bool.fromBool(is_weak)); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { @@ -8406,7 +8409,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); @@ -8461,7 +8464,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -8494,7 +8497,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); @@ -8517,8 +8520,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = self.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); @@ -8641,9 +8644,9 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const dest_ptr_ty = self.air.typeOf(bin_op.lhs); + const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); - const src_ptr_ty = self.air.typeOf(bin_op.rhs); + const src_ptr_ty = self.typeOf(bin_op.rhs); const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); @@ -8663,7 +8666,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.air.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8684,7 +8687,7 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); @@ -8708,7 +8711,7 @@ pub const FuncGen = struct { fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(op, operand_ty, 1, .{operand}); } @@ -8718,7 +8721,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } @@ -8726,7 +8729,7 @@ pub const FuncGen = struct { fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); @@ -8735,7 +8738,7 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8752,7 +8755,7 @@ pub const FuncGen = struct { fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; @@ -8760,7 +8763,7 @@ pub const FuncGen = struct { const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8777,7 +8780,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); @@ -8815,7 +8818,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { @@ -8876,7 +8879,7 @@ pub const FuncGen = struct { fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -8954,7 +8957,7 @@ pub const FuncGen = struct { fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -9083,7 +9086,7 @@ pub const FuncGen = struct { fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const slice_llvm_ty = try self.dg.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); @@ -9097,7 +9100,7 @@ pub const FuncGen = struct { fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9120,7 +9123,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9219,8 +9222,8 @@ pub const FuncGen = struct { const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); - const operand_ty = self.air.typeOf(reduce.operand); - const scalar_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(reduce.operand); + const scalar_ty = self.typeOfIndex(inst); switch (reduce.operation) { .And => return self.builder.buildAndReduce(operand), @@ -9300,12 +9303,12 @@ pub const FuncGen = struct { } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const mod = self.dg.module; switch (result_ty.zigTypeTag(mod)) { .Vector => { @@ -9370,7 +9373,7 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ - .pointee_type = self.air.typeOf(elem), + .pointee_type = self.typeOf(elem), .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, @@ -9440,7 +9443,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -9643,7 +9646,7 @@ pub const FuncGen = struct { fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const llvm_dest_ty = try self.dg.lowerType(inst_ty); @@ -9830,7 +9833,7 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo().data; if (result_ty_info.host_size != 0) { @@ -10172,6 +10175,16 @@ pub const FuncGen = struct { ); return call; } + + fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { + const mod = fg.dg.module; + return fg.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { + const mod = fg.dg.module; + return fg.air.typeOfIndex(inst, mod.intern_pool); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -10833,7 +10846,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= args.len) { return null; } else { - return nextInner(it, fg.air.typeOf(args[it.zig_index])); + return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { return nextInner(it, it.fn_info.param_types[it.zig_index]); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index b8c8466427..41abbde1a0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -233,7 +233,7 @@ pub const DeclGen = struct { fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; if (self.air.value(inst, mod)) |val| { - const ty = self.air.typeOf(inst); + const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, @@ -1720,10 +1720,11 @@ pub const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; + const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) return; - } const air_tags = self.air.instructions.items(.tag); const maybe_result_id: ?IdRef = switch (air_tags[inst]) { @@ -1847,7 +1848,7 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); try self.func.body.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, @@ -1862,7 +1863,7 @@ pub const DeclGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); // the shift and the base must be the same type in SPIR-V, but in Zig the shift is a smaller int. const shift_id = self.spv.allocId(); @@ -1907,15 +1908,15 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; var lhs_id = try self.resolve(bin_op.lhs); var rhs_id = try self.resolve(bin_op.rhs); const result_ty_ref = try self.resolveType(ty, .direct); - assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module)); - assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.lhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.rhs).eql(ty, self.module)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -1971,8 +1972,8 @@ pub const DeclGen = struct { const lhs = try self.resolve(extra.lhs); const rhs = try self.resolve(extra.rhs); - const operand_ty = self.air.typeOf(extra.lhs); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(extra.lhs); + const result_ty = self.typeOfIndex(inst); const info = try self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -2064,14 +2065,14 @@ pub const DeclGen = struct { fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const mod = self.module; if (self.liveness.isUnused(inst)) return null; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2162,8 +2163,8 @@ pub const DeclGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOf(bin_op.lhs); + const result_ty = self.typeOfIndex(inst); return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id); } @@ -2173,11 +2174,11 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const offset_ty = self.air.typeOf(bin_op.rhs); + const offset_ty = self.typeOf(bin_op.rhs); const offset_ty_ref = try self.resolveType(offset_ty, .direct); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const negative_offset_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ @@ -2298,8 +2299,8 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const bool_ty_id = try self.resolveTypeId(Type.bool); - const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs), self.module)); + const ty = self.typeOf(bin_op.lhs); + assert(ty.eql(self.typeOf(bin_op.rhs), self.module)); return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id); } @@ -2337,8 +2338,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const result_ty = self.typeOfIndex(inst); return try self.bitCast(result_ty, operand_ty, operand_id); } @@ -2347,7 +2348,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const mod = self.module; @@ -2391,10 +2392,10 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); const operand_info = try self.arithmeticTypeInfo(operand_ty); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const result_id = self.spv.allocId(); @@ -2418,7 +2419,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = try self.arithmeticTypeInfo(dest_ty); const dest_ty_id = try self.resolveTypeId(dest_ty); @@ -2455,20 +2456,20 @@ pub const DeclGen = struct { fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const field_ty = self.air.typeOfIndex(inst); + const field_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); return try self.extractField(field_ty, operand_id, field); } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2477,7 +2478,7 @@ pub const DeclGen = struct { fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); @@ -2514,7 +2515,7 @@ pub const DeclGen = struct { const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,7 +2527,7 @@ pub const DeclGen = struct { fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2544,7 +2545,7 @@ pub const DeclGen = struct { fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const mod = self.module; const layout = un_ty.unionGetLayout(mod); @@ -2565,7 +2566,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -2604,8 +2605,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolve(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); - const result_ptr_ty = self.air.typeOfIndex(inst); + const struct_ptr_ty = self.typeOf(ty_op.operand); + const result_ptr_ty = self.typeOfIndex(inst); return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index); } @@ -2661,7 +2662,7 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace() == .generic); const child_ty = ptr_ty.childType(); const child_ty_ref = try self.resolveType(child_ty, .indirect); @@ -2694,7 +2695,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.gpa); } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -2727,7 +2728,7 @@ pub const DeclGen = struct { fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.typeOf(br.operand); + const operand_ty = self.typeOf(br.operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { @@ -2777,7 +2778,7 @@ pub const DeclGen = struct { fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; @@ -2787,7 +2788,7 @@ pub const DeclGen = struct { fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); @@ -2819,7 +2820,7 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); @@ -2832,7 +2833,7 @@ pub const DeclGen = struct { fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2853,8 +2854,8 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); const bool_ty_ref = try self.resolveType(Type.bool, .direct); @@ -2911,7 +2912,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -2934,7 +2935,7 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOfIndex(inst); const payload_ty = err_union_ty.errorUnionPayload(); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2966,7 +2967,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); - const optional_ty = self.air.typeOf(un_op); + const optional_ty = self.typeOf(un_op); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); @@ -3030,8 +3031,8 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -3047,14 +3048,14 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3068,7 +3069,7 @@ pub const DeclGen = struct { const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { @@ -3317,7 +3318,7 @@ pub const DeclGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), @@ -3339,7 +3340,7 @@ pub const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_id = try self.resolve(arg); - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; @@ -3363,4 +3364,14 @@ pub const DeclGen = struct { return result_id; } + + fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { + const mod = self.module; + return self.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { + const mod = self.module; + return self.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/print_air.zig b/src/print_air.zig index e8875ff018..39a244e11f 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -306,6 +306,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), .constant => try w.writeConstant(s, inst), + .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -515,7 +516,7 @@ const Writer = struct { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.air.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); @@ -614,6 +615,14 @@ const Writer = struct { try s.print(", {}", .{val.fmtValue(ty, w.module)}); } + fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; + const ip_index = w.air.instructions.items(.data)[inst].interned; + const ty = ip_index.toType(); + try w.writeType(s, ty); + try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)}); + } + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.Asm, ty_pl.payload); @@ -622,7 +631,7 @@ const Writer = struct { var extra_i: usize = extra.end; var op_index: usize = 0; - const ret_ty = w.air.typeOfIndex(inst); + const ret_ty = w.typeOfIndex(inst); try w.writeType(s, ret_ty); if (is_volatile) { @@ -985,4 +994,9 @@ const Writer = struct { try s.print("%{d}", .{inst}); if (dies) try s.writeByte('!'); } + + fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { + const mod = w.module; + return w.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/type.zig b/src/type.zig index 259079a26c..94fd4c2eaf 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2827,21 +2827,35 @@ pub const Type = struct { }; } - /// TODO add enums with no fields here pub fn isNoReturn(ty: Type) bool { - switch (ty.tag()) { - .noreturn => return true, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, + switch (@enumToInt(ty.ip_index)) { + @enumToInt(InternPool.Index.first_type)...@enumToInt(InternPool.Index.noreturn_type) - 1 => return false, + + @enumToInt(InternPool.Index.noreturn_type) => return true, + + @enumToInt(InternPool.Index.noreturn_type) + 1...@enumToInt(InternPool.Index.last_type) => return false, + + @enumToInt(InternPool.Index.first_value)...@enumToInt(InternPool.Index.last_value) => unreachable, + @enumToInt(InternPool.Index.generic_poison) => unreachable, + + // TODO add empty error sets here + // TODO add enums with no fields here else => return false, + + @enumToInt(InternPool.Index.none) => switch (ty.tag()) { + .noreturn => return true, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len == 0; + }, + else => return false, + }, } } -- cgit v1.2.3 From bcd4bb8afbea84d86fd8758b581b141e7086b16b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 16:33:16 -0700 Subject: stage2: move named int types to InternPool --- src/Sema.zig | 51 +------- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/arm/CodeGen.zig | 10 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/codegen.zig | 2 +- src/codegen/c/type.zig | 24 ++-- src/codegen/spirv.zig | 2 +- src/type.zig | 301 +++++++++---------------------------------- src/value.zig | 64 ++++----- 10 files changed, 121 insertions(+), 345 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Sema.zig b/src/Sema.zig index f93ceb19e6..d03460385e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6448,7 +6448,7 @@ fn checkCallArgumentCount( .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { @@ -31421,6 +31421,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -31443,17 +31445,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -31798,6 +31789,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, + .var_args_param_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -32977,17 +32969,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -33213,17 +33194,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, - .usize => return .usize_type, - .isize => return .isize_type, - .c_char => return .c_char_type, - .c_short => return .c_short_type, - .c_ushort => return .c_ushort_type, - .c_int => return .c_int_type, - .c_uint => return .c_uint_type, - .c_long => return .c_long_type, - .c_ulong => return .c_ulong_type, - .c_longlong => return .c_longlong_type, - .c_ulonglong => return .c_ulonglong_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, @@ -33664,6 +33634,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -33686,17 +33658,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 2846633275..4671866197 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; @@ -4353,7 +4353,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); } else unreachable; _ = try self.addInst(.{ @@ -5968,7 +5968,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index eb8cfa9707..ca4a3826aa 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4308,7 +4308,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO } else { @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); - try self.genSetReg(Type.initTag(.usize), .lr, mcv); + try self.genSetReg(Type.usize, .lr, mcv); } // TODO: add Instruction.supportedOn @@ -5694,7 +5694,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ .tag = tag, @@ -5710,7 +5710,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { const offset = if (off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ .tag = tag, @@ -5916,7 +5916,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4ab798fe9c..488b937141 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1749,7 +1749,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, .data = .{ .i_type = .{ diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index e79a216315..343cc2f90e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -883,7 +883,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1352,7 +1352,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); } else unreachable; - try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, diff --git a/src/codegen.zig b/src/codegen.zig index 6846bebe6b..295409781e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -371,7 +371,7 @@ pub fn generateSymbol( // generate length switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.initTag(.usize), + .ty = Type.usize, .val = slice.len, }, code, debug_output, reloc_info)) { .ok => {}, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b964d16bd9..d248753670 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1359,18 +1359,18 @@ pub const CType = extern union { self.* = undefined; if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt(mod)) switch (ty.tag()) { - .usize => self.init(.uintptr_t), - .isize => self.init(.intptr_t), - .c_char => self.init(.char), - .c_short => self.init(.short), - .c_ushort => self.init(.@"unsigned short"), - .c_int => self.init(.int), - .c_uint => self.init(.@"unsigned int"), - .c_long => self.init(.long), - .c_ulong => self.init(.@"unsigned long"), - .c_longlong => self.init(.@"long long"), - .c_ulonglong => self.init(.@"unsigned long long"), + else if (ty.isAbiInt(mod)) switch (ty.ip_index) { + .usize_type => self.init(.uintptr_t), + .isize_type => self.init(.intptr_t), + .c_char_type => self.init(.char), + .c_short_type => self.init(.short), + .c_ushort_type => self.init(.@"unsigned short"), + .c_int_type => self.init(.int), + .c_uint_type => self.init(.@"unsigned int"), + .c_long_type => self.init(.long), + .c_ulong_type => self.init(.@"unsigned long"), + .c_longlong_type => self.init(.@"long long"), + .c_ulonglong_type => self.init(.@"unsigned long long"), else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 41abbde1a0..90c2d93458 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2499,7 +2499,7 @@ pub const DeclGen = struct { const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); - if (ptr_ty.isSinglePointer()) { + if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. return try self.accessChain(elem_ptr_ty_ref, ptr_id, &.{index_id}); diff --git a/src/type.zig b/src/type.zig index 205a732710..4e8d0b9e20 100644 --- a/src/type.zig +++ b/src/type.zig @@ -121,17 +121,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => return .Int, .error_set, @@ -621,19 +610,6 @@ pub const Type = struct { switch (a.tag()) { .generic_poison => unreachable, - // Detect that e.g. u64 != usize, even if the bits match on a particular target. - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, .void, .type, @@ -1013,22 +989,6 @@ pub const Type = struct { switch (ty.tag()) { .generic_poison => unreachable, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - std.hash.autoHash(hasher, ty_tag); - }, - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), @@ -1345,17 +1305,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -1631,17 +1580,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2020,17 +1958,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2322,17 +2249,6 @@ pub const Type = struct { .i32 => return Value.initTag(.i32_type), .u64 => return Value.initTag(.u64_type), .i64 => return Value.initTag(.i64_type), - .usize => return Value.initTag(.usize_type), - .isize => return Value.initTag(.isize_type), - .c_char => return Value.initTag(.c_char_type), - .c_short => return Value.initTag(.c_short_type), - .c_ushort => return Value.initTag(.c_ushort_type), - .c_int => return Value.initTag(.c_int_type), - .c_uint => return Value.initTag(.c_uint_type), - .c_long => return Value.initTag(.c_long_type), - .c_ulong => return Value.initTag(.c_ulong_type), - .c_longlong => return Value.initTag(.c_longlong_type), - .c_ulonglong => return Value.initTag(.c_ulonglong_type), .anyopaque => return Value.initTag(.anyopaque_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), @@ -2462,17 +2378,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .anyerror, .const_slice_u8, @@ -2713,6 +2618,8 @@ pub const Type = struct { .type_info, .generic_poison, => false, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -2734,17 +2641,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .void, .manyptr_u8, @@ -3040,7 +2936,7 @@ pub const Type = struct { .export_options, .extern_options, .@"anyframe", - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, @@ -3089,6 +2985,7 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3130,8 +3027,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .isize, - .usize, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -3153,16 +3048,6 @@ pub const Type = struct { .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3491,7 +3376,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, @@ -3524,6 +3409,7 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3666,8 +3552,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .isize, - .usize, .@"anyframe", .anyframe_T, .optional_single_const_pointer, @@ -3694,16 +3578,6 @@ pub const Type = struct { else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3856,7 +3730,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), .c_char => return target.c_type_bit_size(.char), .c_short => return target.c_type_bit_size(.short), @@ -3896,6 +3770,7 @@ pub const Type = struct { .export_options => unreachable, // missing call to resolveTypeFields .extern_options => unreachable, // missing call to resolveTypeFields .type_info => unreachable, // missing call to resolveTypeFields + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -4000,8 +3875,6 @@ pub const Type = struct { return payload.len * 8 * elem_size + elem_bit_size; }, - .isize, - .usize, .@"anyframe", .anyframe_T, => return target.ptrBitWidth(), @@ -4040,16 +3913,6 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => return target.ptrBitWidth(), - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .error_set, .error_set_single, .anyerror_void_error_union, @@ -4876,12 +4739,6 @@ pub const Type = struct { }; return switch (ty.tag()) { .i8, - .isize, - .c_char, - .c_short, - .c_int, - .c_long, - .c_longlong, .i16, .i32, .i64, @@ -4903,11 +4760,6 @@ pub const Type = struct { else => return false, }; return switch (ty.tag()) { - .usize, - .c_ushort, - .c_uint, - .c_ulong, - .c_ulonglong, .u1, .u8, .u16, @@ -4938,13 +4790,26 @@ pub const Type = struct { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => unreachable, + .array_type => unreachable, .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), - .struct_type => unreachable, + .optional_type => unreachable, + .error_union_type => unreachable, + .simple_type => |t| switch (t) { + .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => unreachable, + }, + .struct_type => @panic("TODO"), .union_type => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4965,17 +4830,6 @@ pub const Type = struct { .i64 => return .{ .signedness = .signed, .bits = 64 }, .u128 => return .{ .signedness = .unsigned, .bits = 128 }, .i128 => return .{ .signedness = .signed, .bits = 128 }, - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, @@ -5003,19 +4857,19 @@ pub const Type = struct { }; } - pub fn isNamedInt(self: Type) bool { - return switch (self.tag()) { - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, + pub fn isNamedInt(ty: Type) bool { + return switch (ty.ip_index) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, => true, else => false, @@ -5180,17 +5034,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => true, else => false, @@ -5284,17 +5127,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -5502,6 +5334,8 @@ pub const Type = struct { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -5524,17 +5358,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6372,17 +6195,6 @@ pub const Type = struct { i64, u128, i128, - usize, - isize, - c_char, - c_short, - c_ushort, - c_int, - c_uint, - c_long, - c_ulong, - c_longlong, - c_ulonglong, anyopaque, bool, void, @@ -6480,17 +6292,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6859,9 +6660,13 @@ pub const Type = struct { pub const @"u29" = initTag(.u29); pub const @"u32" = initTag(.u32); pub const @"u64" = initTag(.u64); + pub const @"u128" = initTag(.u128); + pub const @"i8" = initTag(.i8); + pub const @"i16" = initTag(.i16); pub const @"i32" = initTag(.i32); pub const @"i64" = initTag(.i64); + pub const @"i128" = initTag(.i128); pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; @@ -6870,8 +6675,8 @@ pub const Type = struct { pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; pub const @"bool" = initTag(.bool); - pub const @"usize" = initTag(.usize); - pub const @"isize" = initTag(.isize); + pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; + pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; pub const @"void" = initTag(.void); @@ -6879,8 +6684,18 @@ pub const Type = struct { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"undefined" = initTag(.undefined); pub const @"noreturn" = initTag(.noreturn); + pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; + pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; + pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type, .legacy = undefined }; + pub const @"c_int": Type = .{ .ip_index = .c_int_type, .legacy = undefined }; + pub const @"c_uint": Type = .{ .ip_index = .c_uint_type, .legacy = undefined }; + pub const @"c_long": Type = .{ .ip_index = .c_long_type, .legacy = undefined }; + pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type, .legacy = undefined }; + pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type, .legacy = undefined }; + pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; pub const err_int = Type.u16; diff --git a/src/value.zig b/src/value.zig index 8912209d5e..b0484dfc76 100644 --- a/src/value.zig +++ b/src/value.zig @@ -951,45 +951,45 @@ pub const Value = struct { } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .u1_type => Type.initTag(.u1), - .u8_type => Type.initTag(.u8), - .i8_type => Type.initTag(.i8), - .u16_type => Type.initTag(.u16), - .i16_type => Type.initTag(.i16), - .u29_type => Type.initTag(.u29), - .u32_type => Type.initTag(.u32), - .i32_type => Type.initTag(.i32), - .u64_type => Type.initTag(.u64), - .i64_type => Type.initTag(.i64), - .u128_type => Type.initTag(.u128), - .i128_type => Type.initTag(.i128), - .usize_type => Type.initTag(.usize), - .isize_type => Type.initTag(.isize), - .c_char_type => Type.initTag(.c_char), - .c_short_type => Type.initTag(.c_short), - .c_ushort_type => Type.initTag(.c_ushort), - .c_int_type => Type.initTag(.c_int), - .c_uint_type => Type.initTag(.c_uint), - .c_long_type => Type.initTag(.c_long), - .c_ulong_type => Type.initTag(.c_ulong), - .c_longlong_type => Type.initTag(.c_longlong), - .c_ulonglong_type => Type.initTag(.c_ulonglong), + .u1_type => Type.u1, + .u8_type => Type.u8, + .i8_type => Type.i8, + .u16_type => Type.u16, + .i16_type => Type.i16, + .u29_type => Type.u29, + .u32_type => Type.u32, + .i32_type => Type.i32, + .u64_type => Type.u64, + .i64_type => Type.i64, + .u128_type => Type.u128, + .i128_type => Type.i128, + .usize_type => Type.usize, + .isize_type => Type.isize, + .c_char_type => Type.c_char, + .c_short_type => Type.c_short, + .c_ushort_type => Type.c_ushort, + .c_int_type => Type.c_int, + .c_uint_type => Type.c_uint, + .c_long_type => Type.c_long, + .c_ulong_type => Type.c_ulong, + .c_longlong_type => Type.c_longlong, + .c_ulonglong_type => Type.c_ulonglong, .c_longdouble_type => Type.c_longdouble, .f16_type => Type.f16, .f32_type => Type.f32, .f64_type => Type.f64, .f80_type => Type.f80, .f128_type => Type.f128, - .anyopaque_type => Type.initTag(.anyopaque), - .bool_type => Type.initTag(.bool), - .void_type => Type.initTag(.void), - .type_type => Type.initTag(.type), - .anyerror_type => Type.initTag(.anyerror), - .comptime_int_type => Type.initTag(.comptime_int), + .anyopaque_type => Type.anyopaque, + .bool_type => Type.bool, + .void_type => Type.void, + .type_type => Type.type, + .anyerror_type => Type.anyerror, + .comptime_int_type => Type.comptime_int, .comptime_float_type => Type.comptime_float, - .noreturn_type => Type.initTag(.noreturn), - .null_type => Type.initTag(.null), - .undefined_type => Type.initTag(.undefined), + .noreturn_type => Type.noreturn, + .null_type => Type.null, + .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), -- cgit v1.2.3 From 5e636643d2a36c777a607b65cfd1abbb1822ad1e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 20:30:25 -0700 Subject: stage2: move many Type encodings to InternPool Notably, `vector`. Additionally, all alternate encodings of `pointer`, `optional`, and `array`. --- src/Air.zig | 13 +- src/InternPool.zig | 92 ++- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 2 +- src/Module.zig | 41 +- src/Sema.zig | 1075 +++++++++++++------------ src/TypedValue.zig | 32 +- src/arch/aarch64/CodeGen.zig | 80 +- src/arch/arm/CodeGen.zig | 83 +- src/arch/riscv64/CodeGen.zig | 14 +- src/arch/sparc64/CodeGen.zig | 37 +- src/arch/wasm/CodeGen.zig | 168 ++-- src/arch/x86_64/CodeGen.zig | 314 ++++---- src/arch/x86_64/abi.zig | 6 +- src/codegen.zig | 36 +- src/codegen/c.zig | 203 ++--- src/codegen/c/type.zig | 9 +- src/codegen/llvm.zig | 326 ++++---- src/codegen/spirv.zig | 64 +- src/codegen/spirv/Module.zig | 3 +- src/link/Dwarf.zig | 11 +- src/link/Wasm.zig | 4 +- src/print_air.zig | 6 +- src/type.zig | 1770 +++++++++++------------------------------- src/value.zig | 208 ++--- 25 files changed, 1834 insertions(+), 2771 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 4124788605..64212d3b9a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1375,7 +1375,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), + .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); @@ -1384,18 +1384,21 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .slice_elem_val, .ptr_elem_val, .array_elem_val => { const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_load => { const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_rmw => { const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), + .reduce, .reduce_optimized => { + const operand_ty = air.typeOf(datas[inst].reduce.operand, ip); + return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType(); + }, .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 3ecc18c426..295a694e2a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -31,28 +31,10 @@ const KeyAdapter = struct { pub const Key = union(enum) { int_type: IntType, - ptr_type: struct { - elem_type: Index, - sentinel: Index = .none, - alignment: u16 = 0, - size: std.builtin.Type.Pointer.Size, - is_const: bool = false, - is_volatile: bool = false, - is_allowzero: bool = false, - address_space: std.builtin.AddressSpace = .generic, - }, - array_type: struct { - len: u64, - child: Index, - sentinel: Index, - }, - vector_type: struct { - len: u32, - child: Index, - }, - optional_type: struct { - payload_type: Index, - }, + ptr_type: PtrType, + array_type: ArrayType, + vector_type: VectorType, + opt_type: Index, error_union_type: struct { error_set_type: Index, payload_type: Index, @@ -87,6 +69,47 @@ pub const Key = union(enum) { pub const IntType = std.builtin.Type.Int; + pub const PtrType = struct { + elem_type: Index, + sentinel: Index = .none, + /// If zero use pointee_type.abiAlignment() + /// When creating pointer types, if alignment is equal to pointee type + /// abi alignment, this value should be set to 0 instead. + alignment: u16 = 0, + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16 = 0, + bit_offset: u16 = 0, + vector_index: VectorIndex = .none, + size: std.builtin.Type.Pointer.Size = .One, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: std.builtin.AddressSpace = .generic, + + pub const VectorIndex = enum(u32) { + none = std.math.maxInt(u32), + runtime = std.math.maxInt(u32) - 1, + _, + }; + }; + + pub const ArrayType = struct { + len: u64, + child: Index, + sentinel: Index, + }; + + pub const VectorType = struct { + len: u32, + child: Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -106,7 +129,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .simple_value, @@ -159,8 +182,8 @@ pub const Key = union(enum) { const b_info = b.vector_type; return std.meta.eql(a_info, b_info); }, - .optional_type => |a_info| { - const b_info = b.optional_type; + .opt_type => |a_info| { + const b_info = b.opt_type; return std.meta.eql(a_info, b_info); }, .error_union_type => |a_info| { @@ -220,7 +243,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .struct_type, @@ -630,6 +653,7 @@ pub const Tag = enum(u8) { /// data is payload to Vector. type_vector, /// A fully explicitly specified pointer type. + /// TODO actually this is missing some stuff like bit_offset /// data is payload to Pointer. type_pointer, /// An optional type. @@ -893,7 +917,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, - .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } }, + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), @@ -971,10 +995,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); }, - .optional_type => |optional_type| { + .opt_type => |opt_type| { ip.items.appendAssumeCapacity(.{ .tag = .type_optional, - .data = @enumToInt(optional_type.payload_type), + .data = @enumToInt(opt_type), }); }, .error_union_type => |error_union_type| { @@ -1192,3 +1216,13 @@ test "basic usage" { } }); try std.testing.expect(another_array_i32 == array_i32); } + +pub fn childType(ip: InternPool, i: Index) Index { + return switch (ip.indexToKey(i)) { + .ptr_type => |ptr_type| ptr_type.elem_type, + .vector_type => |vector_type| vector_type.child, + .array_type => |array_type| array_type.child, + .opt_type => |child| child, + else => unreachable, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index 01fbee9e36..19659940af 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,6 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, + ip: InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -534,7 +535,7 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -625,7 +626,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (air.extra[cond_extra.end..][0..2]) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand) == .tomb) + if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[cond_inst]) { @@ -872,6 +873,7 @@ fn analyzeInst( data: *LivenessPassData(pass), inst: Air.Inst.Index, ) Allocator.Error!void { + const ip = a.intern_pool; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); @@ -1140,7 +1142,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e05f1814ce..7059fec507 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index 5c84b123c1..67ca91266c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5805,7 +5805,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // is unused so it just has to be a no-op. sema.air_instructions.set(ptr_inst.*, .{ .tag = .alloc, - .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) }, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, }); } } @@ -6545,7 +6545,7 @@ pub fn populateTestFunctions( } const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6575,7 +6575,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), + .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6609,7 +6609,12 @@ pub fn populateTestFunctions( { // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); + const new_ty = try Type.ptr(arena, mod, .{ + .size = .Slice, + .pointee_type = try tmp_test_fn_ty.copy(arena), + .mutable = false, + .@"addrspace" = .generic, + }); const new_var = try gpa.create(Var); errdefer gpa.destroy(new_var); new_var.* = decl.val.castTag(.variable).?.data.*; @@ -6819,6 +6824,34 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo return i.toType(); } +pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + const i = try intern(mod, .{ .array_type = info }); + return i.toType(); +} + +pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { + const i = try intern(mod, .{ .vector_type = info }); + return i.toType(); +} + +pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { + const i = try intern(mod, .{ .opt_type = child_type }); + return i.toType(); +} + +pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + const i = try intern(mod, .{ .ptr_type = info }); + return i.toType(); +} + +pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index }); +} + +pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } diff --git a/src/Sema.zig b/src/Sema.zig index 7389719301..87df2f23e1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -585,13 +585,18 @@ pub const Block = struct { } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { + const sema = block.sema; + const mod = sema.mod; return block.addInst(.{ .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = try block.sema.addType( - try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool), + .ty = try sema.addType( + try mod.vectorType(.{ + .len = sema.typeOf(lhs).vectorLen(mod), + .child = .bool_type, + }), ), - .payload = try block.sema.addExtra(Air.VectorCmp{ + .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), @@ -1760,7 +1765,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.initTag(.const_slice_u8); + const wanted_type = Type.const_slice_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1788,7 +1793,8 @@ fn analyzeAsType( } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); @@ -1798,13 +1804,13 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod); - const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty)); + const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod); + const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty)); + const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); @@ -2101,11 +2107,10 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { const mod = sema.mod; - const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType() else object_ty; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { - var buf: Type.Payload.ElemType = undefined; - const child_ty = inner_ty.optionalChild(&buf); + const child_ty = inner_ty.optionalChild(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); @@ -2132,7 +2137,7 @@ fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { @@ -2504,6 +2509,7 @@ fn coerceResultPtr( dummy_operand: Air.Inst.Ref, trash_block: *Block, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); const pointee_ty = sema.typeOf(dummy_operand); @@ -2547,7 +2553,7 @@ fn coerceResultPtr( return sema.addConstant(ptr_ty, ptr_val); } if (pointee_ty.eql(Type.null, sema.mod)) { - const opt_ty = sema.typeOf(new_ptr).childType(); + const opt_ty = sema.typeOf(new_ptr).childType(mod); const null_inst = try sema.addConstant(opt_ty, Value.null); _ = try block.addBinOp(.store, new_ptr, null_inst); return Air.Inst.Ref.void_value; @@ -3394,7 +3400,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) - operand_ty.childType() + operand_ty.childType(mod) else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; @@ -3430,7 +3436,7 @@ fn indexablePtrLen( const mod = sema.mod; const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(mod); - const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); } @@ -3441,9 +3447,10 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize() == .Many) return .none; + if (operand_ty.ptrSize(mod) == .Many) return .none; return sema.fieldVal(block, src, operand, "len", src); } @@ -3529,11 +3536,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; // Detect if all stores to an `.alloc` were comptime-known. @@ -3589,9 +3597,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); ptr_info.mutable = false; const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -3947,13 +3956,13 @@ fn zirArrayBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { @@ -3962,7 +3971,7 @@ fn zirArrayBasePtr( }, else => {}, } - return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirFieldBasePtr( @@ -3976,18 +3985,18 @@ fn zirFieldBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } - return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -4129,7 +4138,7 @@ fn validateArrayInitTy( switch (ty.zigTypeTag(mod)) { .Array => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, extra.init_count, @@ -4138,7 +4147,7 @@ fn validateArrayInitTy( return; }, .Vector => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, extra.init_count, @@ -4148,7 +4157,7 @@ fn validateArrayInitTy( }, .Struct => if (ty.isTuple()) { _ = try sema.resolveTypeFields(ty); - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, extra.init_count, @@ -4194,7 +4203,7 @@ fn zirValidateStructInit( const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); - const agg_ty = sema.typeOf(object_ptr).childType(); + const agg_ty = sema.typeOf(object_ptr).childType(mod); switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, @@ -4350,6 +4359,7 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. @@ -4425,14 +4435,13 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { if (struct_ty.castTag(.@"struct")) |struct_obj| { - const mod = sema.mod; const fqn = try struct_obj.data.getFullyQualifiedName(mod); defer gpa.free(fqn); try mod.errNoteNonLazy( @@ -4605,7 +4614,7 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, field_values[i]); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -4624,8 +4633,8 @@ fn zirValidateArrayInit( const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); - const array_ty = sema.typeOf(array_ptr).childType(); - const array_len = array_ty.arrayLen(); + const array_ty = sema.typeOf(array_ptr).childType(mod); + const array_len = array_ty.arrayLen(mod); if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { @@ -4670,10 +4679,10 @@ fn zirValidateArrayInit( // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); - const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val); + const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; @@ -4685,7 +4694,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel()); + const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); @@ -4784,7 +4793,7 @@ fn zirValidateArrayInit( // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { element_vals[instrs.len] = sentinel_val; } @@ -4806,13 +4815,13 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); - } else switch (operand_ty.ptrSize()) { + } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), } - if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) { + if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } @@ -5132,7 +5141,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len), + try Type.array(anon_decl.arena(), gop.key_ptr.len, Value.zero, Type.u8, mod), try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), 0, // default alignment ); @@ -6003,10 +6012,11 @@ fn addDbgVar( air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (air_tag) { .dbg_var_ptr => { - if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return; + if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return; }, .dbg_var_val => { if (!(try sema.typeHasRuntimeBits(operand_ty))) return; @@ -6238,7 +6248,7 @@ fn popErrorReturnTrace( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6263,7 +6273,7 @@ fn popErrorReturnTrace( // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6456,16 +6466,15 @@ fn checkCallArgumentCount( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const opt_child = callee_ty.optionalChild(&buf); + const opt_child = callee_ty.optionalChild(mod); if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and - opt_child.childType().zigTypeTag(mod) == .Fn)) + opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6529,7 +6538,7 @@ fn callBuiltin( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } @@ -7929,7 +7938,7 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } - const opt_type = try Type.optional(sema.arena, child_type); + const opt_type = try Type.optional(sema.arena, child_type, mod); return sema.addType(opt_type); } @@ -7949,16 +7958,17 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"); + const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try Type.Tag.vector.create(sema.arena, .{ - .len = @intCast(u32, len), - .elem_type = elem_type, + const vector_type = try mod.vectorType(.{ + .len = len, + .child = elem_type.ip_index, }); return sema.addType(vector_type); } @@ -8377,16 +8387,16 @@ fn analyzeOptionalPayloadPtr( const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); - const opt_type = optional_ptr_ty.elemType(); + const opt_type = optional_ptr_ty.childType(mod); if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } - const child_type = try opt_type.optionalChildAlloc(sema.arena); + const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), - .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), + .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { @@ -8401,7 +8411,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8414,7 +8424,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8448,14 +8458,14 @@ fn zirOptionalPayload( const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag(mod)) { - .Optional => try operand_ty.optionalChildAlloc(sema.arena), + .Optional => operand_ty.optionalChild(mod), .Pointer => t: { - if (operand_ty.ptrSize() != .C) { + if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); break :t try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", @@ -8569,18 +8579,18 @@ fn analyzeErrUnionPayloadPtr( const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const err_union_ty = operand_ty.elemType(); + const err_union_ty = operand_ty.childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), - .@"addrspace" = operand_ty.ptrAddressSpace(), + .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { @@ -8596,7 +8606,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8609,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8674,13 +8684,13 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const result_ty = operand_ty.elemType().errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { @@ -10119,7 +10129,7 @@ fn zirSwitchCapture( const operand_is_ref = cond_tag == .switch_cond_ref; const operand_ptr = try sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; @@ -10131,9 +10141,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( ptr_field_ty, @@ -10150,9 +10160,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { @@ -10235,7 +10245,7 @@ fn zirSwitchCapture( const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { @@ -10311,7 +10321,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node; const operand_ptr = try sema.resolveInst(cond_data.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { @@ -10448,7 +10458,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const cond_index = Zir.refToIndex(extra.data.operand).?; const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable; const target_ty = sema.typeOf(raw_operand); - break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; + break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty; }; const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; @@ -12132,7 +12142,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), + try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12200,7 +12210,7 @@ fn zirShl( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12220,7 +12230,7 @@ fn zirShl( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12388,7 +12398,7 @@ fn zirShr( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12408,7 +12418,7 @@ fn zirShr( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12571,7 +12581,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (val.isUndef()) { return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -12768,8 +12778,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; @@ -12883,9 +12893,9 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { - .Array => return operand_ty.arrayInfo(), + .Array => return operand_ty.arrayInfo(mod), .Pointer => { - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.size) { // TODO: in the Many case here this should only work if the type // has a sentinel, and this code should compute the length based @@ -12900,7 +12910,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }, .One => { if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { - return ptr_info.pointee_type.arrayInfo(); + return ptr_info.pointee_type.arrayInfo(mod); } }, .C => {}, @@ -12912,7 +12922,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, - .len = operand_ty.arrayLen(), + .len = operand_ty.arrayLen(mod), }; } }, @@ -13035,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -14022,7 +14032,7 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -14484,7 +14494,10 @@ fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { const mod = sema.mod; - const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .u1_type, + }) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14520,7 +14533,7 @@ fn analyzeArithmetic( const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { @@ -14993,9 +15006,9 @@ fn analyzePtrArithmetic( const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) - ptr_info.pointee_type.childType() + ptr_info.pointee_type.childType(mod) else ptr_info.pointee_type; @@ -15466,7 +15479,10 @@ fn cmpSelf( if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { - const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = resolved_type.vectorLen(mod), + .child = .bool_type, + }); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); } @@ -15767,6 +15783,7 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); @@ -15778,7 +15795,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), + try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15791,7 +15808,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), + try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16024,7 +16041,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else @@ -16059,7 +16076,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Array => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16077,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Vector => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16095,7 +16112,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); + field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod)); return sema.addConstant( type_info_ty, @@ -16141,7 +16158,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16250,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16338,7 +16355,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16448,7 +16465,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16490,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16666,14 +16683,15 @@ fn typeInfoNamespaceDecls( decl_vals: *std.ArrayList(Value), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { + const mod = sema.mod; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); for (decls) |decl_index| { - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; - try sema.mod.ensureDeclAnalyzed(decl_index); + try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; @@ -16684,7 +16702,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16770,9 +16788,9 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi .Vector => { const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return Type.Tag.vector.create(sema.arena, .{ - .len = operand.vectorLen(), - .elem_type = log2_elem_ty, + return mod.vectorType(.{ + .len = operand.vectorLen(mod), + .child = log2_elem_ty.ip_index, }); }, else => {}, @@ -17207,7 +17225,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr _ = try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = err_union_ty.errorUnionPayload(), .@"addrspace" = ptr_info.@"addrspace", @@ -17398,6 +17416,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -17409,7 +17428,7 @@ fn retWithErrTracing( const gpa = sema.gpa; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try sema.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; @@ -17755,7 +17774,7 @@ fn structInitEmpty( fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { const mod = sema.mod; - const arr_len = obj_ty.arrayLen(); + const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); @@ -17763,7 +17782,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel()) |sentinel| { + if (obj_ty.sentinel(mod)) |sentinel| { const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); return sema.addConstant(obj_ty, val); } else { @@ -18199,6 +18218,7 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -18208,8 +18228,7 @@ fn zirArrayInit( assert(args.len >= 2); // array_ty + at least one element const array_ty = try sema.resolveType(block, src, args[0]); - const sentinel_val = array_ty.sentinel(); - const mod = sema.mod; + const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); @@ -18489,14 +18508,16 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { + const mod = sema.mod; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod); if (sema.owner_func != null and sema.owner_func.?.calls_or_awaits_errorable_fn and - sema.mod.comp.bin_file.options.error_return_tracing and - sema.mod.backendSupportsFeature(.error_return_trace)) + mod.comp.bin_file.options.error_return_tracing and + mod.backendSupportsFeature(.error_return_trace)) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } @@ -18585,8 +18606,11 @@ fn zirUnaryMath( switch (operand_ty.zigTypeTag(mod)) { .Vector => { const scalar_ty = operand_ty.scalarType(mod); - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -18730,12 +18754,15 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(mod); + const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena)); + const ty = try mod.vectorType(.{ + .len = len, + .child = child_ty.ip_index, + }); return sema.addType(ty); }, .Float => { @@ -18872,7 +18899,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const child_ty = try child_val.toType().copy(sema.arena); - const ty = try Type.optional(sema.arena, child_ty); + const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); }, .ErrorUnion => { @@ -18912,7 +18939,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19038,7 +19065,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19215,7 +19242,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19482,7 +19509,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, mod, ); @@ -19626,7 +19653,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); const src_addrspace = ptr_info.@"addrspace"; if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { const msg = msg: { @@ -19641,7 +19668,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) - try Type.optional(sema.arena, dest_ptr_ty) + try Type.optional(sema.arena, dest_ptr_ty, mod) else dest_ptr_ty; @@ -19731,6 +19758,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -19738,10 +19766,10 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); + const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19842,7 +19870,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19987,8 +20015,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo().data; - const dest_info = dest_ty.ptrInfo().data; + const operand_info = operand_ty.ptrInfo(mod); + const dest_info = dest_ty.ptrInfo(mod); if (!operand_info.mutable and dest_info.mutable) { const msg = msg: { const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); @@ -20042,12 +20070,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result if (dest_ty.zigTypeTag(mod) == .Optional) { - var buf: Type.Payload.ElemType = undefined; - var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; + var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); } else { - var dest_ptr_info = dest_ty.ptrInfo().data; + var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); } @@ -20110,6 +20137,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20117,7 +20145,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20130,6 +20158,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20137,7 +20166,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20163,7 +20192,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) - try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) + try mod.vectorType(.{ + .len = operand_ty.vectorLen(mod), + .child = dest_scalar_ty.ip_index, + }) else dest_scalar_ty; @@ -20218,7 +20250,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); } var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); + const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); @@ -20245,7 +20277,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { @@ -20314,8 +20346,11 @@ fn zirBitCount( const result_scalar_ty = try mod.smallestUnsignedInt(bits); switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = result_scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -20388,7 +20423,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20437,7 +20472,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20546,7 +20581,7 @@ fn checkInvalidPtrArithmetic( ) CompileError!void { const mod = sema.mod; switch (try ty.zigTypeTagOrPoison(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.fail( block, @@ -20676,7 +20711,7 @@ fn checkNumericType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20726,7 +20761,7 @@ fn checkAtomicPtrOperand( const ptr_ty = sema.typeOf(ptr); const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { - .Pointer => ptr_ty.ptrInfo().data, + .Pointer => ptr_ty.ptrInfo(mod), else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); @@ -20797,7 +20832,7 @@ fn checkIntOrVector( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20821,7 +20856,7 @@ fn checkIntOrVectorAllowComptime( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20870,7 +20905,7 @@ fn checkSimdBinOp( const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20912,8 +20947,8 @@ fn checkVectorizableBinaryOperands( }; if (lhs_is_vector and rhs_is_vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.arrayLen(mod); + const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -20966,7 +21001,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.initTag(.const_slice_u8); + const name_ty = Type.const_slice_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -20975,7 +21010,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.initTag(.const_slice_u8); + const section_ty = Type.const_slice_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21087,7 +21122,7 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } - const result_ty = try Type.optional(sema.arena, elem_ty); + const result_ty = try Type.optional(sema.arena, elem_ty, mod); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { @@ -21133,6 +21168,7 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -21141,9 +21177,9 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); - const vector_ty = try Type.Tag.vector.create(sema.arena, .{ + const vector_ty = try mod.vectorType(.{ .len = len, - .elem_type = scalar_ty, + .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); @@ -21172,7 +21208,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } - const scalar_ty = operand_ty.childType(); + const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { @@ -21190,7 +21226,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }, } - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. @@ -21243,12 +21279,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask_ty = sema.typeOf(mask); const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(mask).arrayLen(), + .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; - mask_ty = try Type.Tag.vector.create(sema.arena, .{ - .len = mask_len, - .elem_type = Type.i32, + mask_ty = try mod.vectorType(.{ + .len = @intCast(u32, mask_len), + .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); @@ -21272,13 +21308,13 @@ fn analyzeShuffle( var a = a_arg; var b = b_arg; - const res_ty = try Type.Tag.vector.create(sema.arena, .{ + const res_ty = try mod.vectorType(.{ .len = mask_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(a).arrayLen(), + .Array, .Vector => sema.typeOf(a).arrayLen(mod), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21286,7 +21322,7 @@ fn analyzeShuffle( }), }; var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(b).arrayLen(), + .Array, .Vector => sema.typeOf(b).arrayLen(mod), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21296,16 +21332,16 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = maybe_a_len orelse maybe_b_len.?; - const b_len = maybe_b_len orelse a_len; + const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); + const b_len = @intCast(u32, maybe_b_len orelse a_len); - const a_ty = try Type.Tag.vector.create(sema.arena, .{ + const a_ty = try mod.vectorType(.{ .len = a_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); - const b_ty = try Type.Tag.vector.create(sema.arena, .{ + const b_ty = try mod.vectorType(.{ .len = b_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21437,15 +21473,21 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_ty = sema.typeOf(pred_uncoerced); const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { - .Vector, .Array => pred_ty.arrayLen(), + .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; - const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); + const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool); + const bool_vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = .bool_type, + }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty); + const vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = elem_ty.ip_index, + }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21854,7 +21896,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } try sema.checkPtrOperand(block, ptr_src, field_ptr_ty); - const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; + const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = parent_ty.structFieldType(field_index), @@ -22052,8 +22094,8 @@ fn analyzeMinMax( } const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { - const elem_ty = orig_ty.childType(); - const len = orig_ty.vectorLen(); + const elem_ty = orig_ty.childType(mod); + const len = orig_ty.vectorLen(mod); if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22068,7 +22110,10 @@ fn analyzeMinMax( } const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); - break :blk try Type.vector(sema.arena, len, refined_elem_ty); + break :blk try mod.vectorType(.{ + .len = len, + .child = refined_elem_ty.ip_index, + }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef @@ -22129,8 +22174,8 @@ fn analyzeMinMax( if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; - const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; - const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty; if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats @@ -22150,7 +22195,10 @@ fn analyzeMinMax( const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) - try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + try mod.vectorType(.{ + .len = unrefined_ty.vectorLen(mod), + .child = final_elem_ty.ip_index, + }) else final_elem_ty; @@ -22165,7 +22213,7 @@ fn analyzeMinMax( fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const mod = sema.mod; - const info = sema.typeOf(ptr).ptrInfo().data; + const info = sema.typeOf(ptr).ptrInfo(mod); if (info.size == .One) { // Already an array pointer. return ptr; @@ -22659,7 +22707,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.initTag(.const_slice_u8); + const ty = Type.const_slice_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -22943,7 +22991,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -22957,7 +23005,7 @@ fn resolveExternOptions( const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -22994,7 +23042,7 @@ fn zirBuiltinExtern( if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty.childType(), .other)) { + if (!try sema.validateExternType(ty.childType(mod), .other)) { const msg = msg: { const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); @@ -23014,7 +23062,7 @@ fn zirBuiltinExtern( }; if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { - ty = try Type.optional(sema.arena, ty); + ty = try Type.optional(sema.arena, ty, mod); } // TODO check duplicate extern @@ -23194,7 +23242,7 @@ fn validateRunTimeType( => return false, .Pointer => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), @@ -23204,11 +23252,10 @@ fn validateRunTimeType( .Opaque => return is_extern, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); return sema.validateRunTimeType(child_ty, is_extern); }, - .Array, .Vector => ty = ty.elemType(), + .Array, .Vector => ty = ty.childType(mod), .ErrorUnion => ty = ty.errorUnionPayload(), @@ -23277,7 +23324,7 @@ fn explainWhyTypeIsComptimeInner( }, .Array, .Vector => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { const elem_ty = ty.elemType2(mod); @@ -23295,12 +23342,11 @@ fn explainWhyTypeIsComptimeInner( } return; } - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); @@ -23451,7 +23497,7 @@ fn explainWhyTypeIsNotExtern( if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { - const pointee_ty = ty.childType(); + const pointee_ty = ty.childType(mod); try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty); } @@ -23698,7 +23744,7 @@ fn panicWithMsg( .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty), + try Type.optional(arena, ptr_stack_trace_ty, mod), Value.null, ); const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; @@ -23927,7 +23973,7 @@ fn fieldVal( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -23936,12 +23982,12 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), + try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { - const ptr_info = object_ty.ptrInfo().data; + const ptr_info = object_ty.ptrInfo(mod); const result_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = ptr_info.pointee_type.childType(), + .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -23964,7 +24010,7 @@ fn fieldVal( } }, .Pointer => { - const ptr_info = inner_ty.ptrInfo().data; + const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { if (mem.eql(u8, field_name, "ptr")) { const slice = if (is_pointer_to) @@ -24107,7 +24153,7 @@ fn fieldPtr( const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { - .Pointer => object_ptr_ty.elemType(), + .Pointer => object_ptr_ty.childType(mod), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24117,7 +24163,7 @@ fn fieldPtr( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -24128,7 +24174,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), + try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -24154,9 +24200,9 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24175,9 +24221,9 @@ fn fieldPtr( } else if (mem.eql(u8, field_name, "len")) { const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24329,14 +24375,14 @@ fn fieldCallBind( const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) - raw_ptr_ty.childType() + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) + raw_ptr_ty.childType(mod) else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -24404,9 +24450,9 @@ fn fieldCallBind( // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and - (first_param_type.ptrSize() == .One or - first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, sema.mod))) + (first_param_type.ptrSize(mod) == .One or + first_param_type.ptrSize(mod) == .C) and + first_param_type.childType(mod).eql(concrete_ty, sema.mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24425,8 +24471,7 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (first_param_type.zigTypeTag(mod) == .Optional) { - var opt_buf: Type.Payload.ElemType = undefined; - const child = first_param_type.optionalChild(&opt_buf); + const child = first_param_type.optionalChild(mod); if (child.eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24434,8 +24479,8 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (child.zigTypeTag(mod) == .Pointer and - child.ptrSize() == .One and - child.childType().eql(concrete_ty, sema.mod)) + child.ptrSize(mod) == .One and + child.childType(mod).eql(concrete_ty, sema.mod)) { return .{ .method = .{ .func_inst = decl_val, @@ -24482,15 +24527,15 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { + const mod = sema.mod; const arena = sema.arena; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = ptr_ty.ptrIsMutable(), - .@"addrspace" = ptr_ty.ptrAddressSpace(), + .mutable = ptr_ty.ptrIsMutable(mod), + .@"addrspace" = ptr_ty.ptrAddressSpace(mod), }); - const mod = sema.mod; - const container_ty = ptr_ty.childType(); + const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; @@ -24618,7 +24663,7 @@ fn structFieldPtrByIndex( const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); - const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data; + const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, @@ -24696,7 +24741,7 @@ fn structFieldPtrByIndex( ptr_field_ty, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(), + .container_ty = struct_ptr_ty.childType(mod), .field_index = field_index, }), ); @@ -24846,9 +24891,9 @@ fn unionFieldPtr( const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, - .mutable = union_ptr_ty.ptrIsMutable(), + .mutable = union_ptr_ty.ptrIsMutable(mod), .@"volatile" = union_ptr_ty.isVolatilePtr(), - .@"addrspace" = union_ptr_ty.ptrAddressSpace(), + .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25009,7 +25054,7 @@ fn elemPtr( const indexable_ptr_ty = sema.typeOf(indexable_ptr); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { - .Pointer => indexable_ptr_ty.elemType(), + .Pointer => indexable_ptr_ty.childType(mod), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -25046,7 +25091,7 @@ fn elemPtrOneLayerOnly( try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.ptrSize()) { + switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25065,7 +25110,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25091,7 +25136,7 @@ fn elemVal( const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); switch (indexable_ty.zigTypeTag(mod)) { - .Pointer => switch (indexable_ty.ptrSize()) { + .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25112,7 +25157,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25171,7 +25216,7 @@ fn tupleFieldPtr( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(); + const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); const field_count = tuple_ty.structFieldCount(); @@ -25188,9 +25233,9 @@ fn tupleFieldPtr( const field_ty = tuple_ty.structFieldType(field_index); const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = tuple_ptr_ty.ptrIsMutable(), + .mutable = tuple_ptr_ty.ptrIsMutable(mod), .@"volatile" = tuple_ptr_ty.isVolatilePtr(), - .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), + .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { @@ -25271,10 +25316,10 @@ fn elemValArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ty = sema.typeOf(array); - const array_sent = array_ty.sentinel(); - const array_len = array_ty.arrayLen(); + const array_sent = array_ty.sentinel(mod); + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent != null); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); @@ -25335,9 +25380,9 @@ fn elemPtrArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); - const array_ty = array_ptr_ty.childType(); - const array_sent = array_ty.sentinel() != null; - const array_len = array_ty.arrayLen(); + const array_ty = array_ptr_ty.childType(mod); + const array_sent = array_ty.sentinel(mod) != null; + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent); if (array_len_s == 0) { @@ -25396,7 +25441,7 @@ fn elemValSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; @@ -25453,7 +25498,7 @@ fn elemPtrSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. @@ -25614,7 +25659,7 @@ fn coerceExtra( } // T to ?T - const child_type = try dest_ty.optionalChildAlloc(sema.arena); + const child_type = dest_ty.optionalChild(mod); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { @@ -25628,7 +25673,7 @@ fn coerceExtra( return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { - const dest_info = dest_ty.ptrInfo().data; + const dest_info = dest_ty.ptrInfo(mod); // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { @@ -25643,11 +25688,11 @@ fn coerceExtra( if (dest_info.size != .One) break :single_item; if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const ptr_elem_ty = inst_ty.childType(); + const ptr_elem_ty = inst_ty.childType(mod); const array_ty = dest_info.pointee_type; if (array_ty.zigTypeTag(mod) != .Array) break :single_item; - const array_elem_ty = array_ty.childType(); - if (array_ty.arrayLen() != 1) break :single_item; + const array_elem_ty = array_ty.childType(mod); + if (array_ty.arrayLen(mod) != 1) break :single_item; const dest_is_mut = dest_info.mutable; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, @@ -25660,9 +25705,9 @@ fn coerceExtra( src_array_ptr: { if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const array_ty = inst_ty.childType(); + const array_ty = inst_ty.childType(mod); if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; - const array_elem_type = array_ty.childType(); + const array_elem_type = array_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; @@ -25680,7 +25725,7 @@ fn coerceExtra( } if (dest_info.sentinel) |dest_sent| { - if (array_ty.sentinel()) |inst_sent| { + if (array_ty.sentinel(mod)) |inst_sent| { if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, @@ -25721,7 +25766,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. - const src_elem_ty = inst_ty.childType(); + const src_elem_ty = inst_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { @@ -25784,7 +25829,7 @@ fn coerceExtra( }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, dest_info.pointee_type, @@ -25814,7 +25859,7 @@ fn coerceExtra( .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25823,7 +25868,7 @@ fn coerceExtra( .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -25835,7 +25880,7 @@ fn coerceExtra( .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isTuple() and + inst_ty.childType(mod).isTuple() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25854,7 +25899,7 @@ fn coerceExtra( } if (!inst_ty.isSinglePointer(mod)) break :to_slice; - const inst_child_ty = inst_ty.childType(); + const inst_child_ty = inst_ty.childType(mod); if (!inst_child_ty.isTuple()) break :to_slice; // empty tuple to zero-length slice @@ -25887,7 +25932,7 @@ fn coerceExtra( .Many => p: { if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, @@ -26196,9 +26241,8 @@ fn coerceExtra( } // ?T to T - var buf: Type.Payload.ElemType = undefined; if (inst_ty.zigTypeTag(mod) == .Optional and - (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); @@ -26399,10 +26443,8 @@ const InMemoryCoercionResult = union(enum) { cur = pair.child; }, .optional_shape => |pair| { - var buf_actual: Type.Payload.ElemType = undefined; - var buf_wanted: Type.Payload.ElemType = undefined; try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod), }); break; }, @@ -26640,10 +26682,8 @@ fn coerceInMemoryAllowed( } // Pointers / Pointer-like Optionals - var dest_buf: Type.Payload.ElemType = undefined; - var src_buf: Type.Payload.ElemType = undefined; - const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf); - const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf); + const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); + const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); @@ -26685,8 +26725,8 @@ fn coerceInMemoryAllowed( // Arrays if (dest_tag == .Array and src_tag == .Array) { - const dest_info = dest_ty.arrayInfo(); - const src_info = src_ty.arrayInfo(); + const dest_info = dest_ty.arrayInfo(mod); + const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, @@ -26717,8 +26757,8 @@ fn coerceInMemoryAllowed( // Vectors if (dest_tag == .Vector and src_tag == .Vector) { - const dest_len = dest_ty.vectorLen(); - const src_len = src_ty.vectorLen(); + const dest_len = dest_ty.vectorLen(mod); + const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, @@ -26748,8 +26788,8 @@ fn coerceInMemoryAllowed( .wanted = dest_ty, } }; } - const dest_child_type = dest_ty.optionalChild(&dest_buf); - const src_child_type = src_ty.optionalChild(&src_buf); + const dest_child_type = dest_ty.optionalChild(mod); + const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src); if (child != .ok) { @@ -27019,8 +27059,8 @@ fn coerceInMemoryAllowedPtrs( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; - const dest_info = dest_ptr_ty.ptrInfo().data; - const src_info = src_ptr_ty.ptrInfo().data; + const dest_info = dest_ptr_ty.ptrInfo(mod); + const src_info = src_ptr_ty.ptrInfo(mod); const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; @@ -27206,11 +27246,12 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr()) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy @@ -27221,7 +27262,6 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - const mod = sema.mod; if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; @@ -27247,7 +27287,7 @@ fn storePtr2( // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { - const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -27288,7 +27328,7 @@ fn storePtr2( try sema.requireRuntimeBlock(block, src, runtime_src); try sema.queueFullTypeResolution(elem_ty); - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -27322,8 +27362,8 @@ fn storePtr2( /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const array_ty = sema.typeOf(ptr).childType(); const mod = sema.mod; + const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); @@ -27332,7 +27372,6 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const prev_ptr = air_datas[ptr_inst].ty_op.operand; const prev_ptr_ty = sema.typeOf(prev_ptr); const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; @@ -27342,9 +27381,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(); - if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and - array_ty.arrayLen() == vector_ty.vectorLen()) + const vector_ty = sema.typeOf(prev_ptr).childType(mod); + if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { return prev_ptr; } else { @@ -27476,14 +27515,14 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); + const check_len = parent.ty.arrayLenIncludingSentinel(mod); if (elem_ptr.index >= check_len) { // TODO have the parent include the decl so we can say "declared here" return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ elem_ptr.index, check_len, }); } - const elem_ty = parent.ty.childType(); + const elem_ty = parent.ty.childType(mod); // We might have a pointer to multiple elements of the array (e.g. a pointer // to a sub-array). In this case, we just have to reinterpret the relevant @@ -27510,7 +27549,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); @memset(elems, Value.undef); @@ -27536,7 +27575,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); // bytes.len may be one greater than dest_len because of the case when // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. assert(bytes.len >= dest_len); @@ -27567,13 +27606,13 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { elems[i] = try Value.Tag.int_u64.create(arena, byte); } - if (parent.ty.sentinel()) |sent_val| { + if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); elems[bytes.len] = sent_val; } @@ -27603,7 +27642,7 @@ fn beginComptimePtrMutation( const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); if (elems.len > 0) elems[0] = repeated_val; for (elems[1..]) |*elem| { @@ -27906,12 +27945,12 @@ fn beginComptimePtrMutation( }, .opt_payload_ptr => { const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena)); + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); }; var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); switch (parent.pointee) { .direct => |val_ptr| { - const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); + const payload_ty = parent.ty.optionalChild(mod); switch (val_ptr.tag()) { .undef, .null_value => { // An optional has been initialized to undefined at comptime and now we @@ -27984,7 +28023,7 @@ fn beginComptimePtrMutationInner( // Handle the case that the decl is an array and we're actually trying to point to an element. if (decl_ty.isArrayOrVector(mod)) { - const decl_elem_ty = decl_ty.childType(); + const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, @@ -28105,7 +28144,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(); + const deref_elem_ty = deref.pointee.?.ty.childType(mod); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; } else false; @@ -28115,12 +28154,12 @@ fn beginComptimePtrLoad( } var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(); + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), @@ -28134,7 +28173,7 @@ fn beginComptimePtrLoad( break :blk deref; } if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel()) |sent| { + if (array_tv.ty.sentinel(mod)) |sent| { deref.pointee = TypedValue{ .ty = elem_ty, .val = sent, @@ -28226,7 +28265,7 @@ fn beginComptimePtrLoad( const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; const payload_ty = switch (ptr_val.tag()) { .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), + .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), else => unreachable, }; var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); @@ -28357,12 +28396,13 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); - const array_ty = ptr_array_ty.childType(); + const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), + .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28371,11 +28411,11 @@ fn coerceArrayPtrToSlice( } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const dest_info = dest_ty.ptrInfo().data; - const inst_info = inst_ty.ptrInfo().data; const mod = sema.mod; - const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or - (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or + const dest_info = dest_ty.ptrInfo(mod); + const inst_info = inst_ty.ptrInfo(mod); + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or + (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); const ok_cv_qualifiers = @@ -28647,7 +28687,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const union_ty = ptr_union_ty.childType(); + const mod = sema.mod; + const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); @@ -28661,7 +28702,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const struct_ty = ptr_struct_ty.childType(); + const mod = sema.mod; + const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); @@ -28676,15 +28718,16 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); + const target = mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(mod), inst_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -28694,8 +28737,8 @@ fn coerceArrayLike( return sema.failWithOwnedErrorMsg(msg); } - const dest_elem_ty = dest_ty.childType(); - const inst_elem_ty = inst_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); + const inst_elem_ty = inst_ty.childType(mod); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { @@ -28749,9 +28792,10 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = dest_ty.arrayLen(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { @@ -28766,16 +28810,16 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel()); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); - const dest_elem_ty = dest_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; for (element_vals, 0..) |*elem, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel().?; + elem.* = dest_ty.sentinel(mod).?; element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); break; } @@ -28812,9 +28856,10 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const tuple_ty = sema.typeOf(ptr_tuple).childType(); + const mod = sema.mod; + const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const slice_info = slice_ty.ptrInfo().data; + const slice_info = slice_ty.ptrInfo(mod); const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { @@ -28833,8 +28878,9 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const ptr_info = ptr_array_ty.ptrInfo().data; + const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = ptr_info.pointee_type; const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.@"align" != 0) { @@ -29231,7 +29277,7 @@ fn analyzeLoad( const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ty.childType(), + .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29245,7 +29291,7 @@ fn analyzeLoad( } } - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -29318,8 +29364,7 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); - var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { @@ -29339,7 +29384,7 @@ fn analyzePtrIsNonErrComptimeOnly( const mod = sema.mod; const ptr_ty = sema.typeOf(operand); assert(ptr_ty.zigTypeTag(mod) == .Pointer); - const child_ty = ptr_ty.childType(); + const child_ty = ptr_ty.childType(mod); const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; @@ -29495,7 +29540,7 @@ fn analyzeSlice( // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ptr_ty.elemType(), + .Pointer => ptr_ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; @@ -29506,30 +29551,30 @@ fn analyzeSlice( var ptr_sentinel: ?Value = null; switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); - elem_ty = ptr_ptr_child_ty.childType(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); + elem_ty = ptr_ptr_child_ty.childType(mod); }, - .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { + .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { - const double_child_ty = ptr_ptr_child_ty.childType(); + const double_child_ty = ptr_ptr_child_ty.childType(mod); if (double_child_ty.zigTypeTag(mod) == .Array) { - ptr_sentinel = double_child_ty.sentinel(); + ptr_sentinel = double_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; - elem_ty = double_child_ty.childType(); + elem_ty = double_child_ty.childType(mod); } else { return sema.fail(block, src, "slice of single-item pointer", .{}); } }, .Many, .C => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); - if (ptr_ptr_child_ty.ptrSize() == .C) { + if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); @@ -29538,11 +29583,11 @@ fn analyzeSlice( } }, .Slice => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), @@ -29563,7 +29608,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); + const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29574,10 +29619,10 @@ fn analyzeSlice( if (try sema.resolveMaybeUndefVal(end)) |end_val| { const len_s_val = try Value.Tag.int_u64.create( sema.arena, - array_ty.arrayLenIncludingSentinel(), + array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { - const sentinel_label: []const u8 = if (array_ty.sentinel() != null) + const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; @@ -29617,7 +29662,7 @@ fn analyzeSlice( if (slice_val.isUndef()) { return sema.fail(block, src, "slice of undefined", .{}); } - const has_sentinel = slice_ty.sentinel() != null; + const has_sentinel = slice_ty.sentinel(mod) != null; var int_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), @@ -29751,8 +29796,8 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; - const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod); + const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(mod); @@ -29780,7 +29825,7 @@ fn analyzeSlice( if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - const actual_len = if (slice_ty.sentinel() == null) + const actual_len = if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29839,7 +29884,7 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) - try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) + try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the @@ -29848,7 +29893,7 @@ fn analyzeSlice( } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - if (slice_ty.sentinel() == null) break :blk slice_len_inst; + if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -30284,7 +30329,10 @@ fn cmpVector( const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = lhs_ty.vectorLen(mod), + .child = .bool_type, + }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -30484,12 +30532,12 @@ fn resolvePeerTypes( } continue; }, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { @@ -30654,10 +30702,10 @@ fn resolvePeerTypes( }, }, .Pointer => { - const cand_info = candidate_ty.ptrInfo().data; + const cand_info = candidate_ty.ptrInfo(mod); switch (chosen_ty_tag) { .Pointer => { - const chosen_info = chosen_ty.ptrInfo().data; + const chosen_info = chosen_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30690,8 +30738,8 @@ fn resolvePeerTypes( chosen_info.pointee_type.zigTypeTag(mod) == .Array and cand_info.pointee_type.zigTypeTag(mod) == .Array) { - const chosen_elem_ty = chosen_info.pointee_type.childType(); - const cand_elem_ty = cand_info.pointee_type.childType(); + const chosen_elem_ty = chosen_info.pointee_type.childType(mod); + const cand_elem_ty = cand_info.pointee_type.childType(mod); const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); if (chosen_ok) { @@ -30757,10 +30805,9 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); + const chosen_ptr_ty = chosen_ty.optionalChild(mod); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30777,7 +30824,7 @@ fn resolvePeerTypes( .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30802,8 +30849,7 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); + const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { seen_const = seen_const or opt_child_ty.isConstPtr(); any_are_null = true; @@ -30818,13 +30864,13 @@ fn resolvePeerTypes( }, .Vector => switch (chosen_ty_tag) { .Vector => { - const chosen_len = chosen_ty.vectorLen(); - const candidate_len = candidate_ty.vectorLen(); + const chosen_len = chosen_ty.vectorLen(mod); + const candidate_len = candidate_ty.vectorLen(mod); if (chosen_len != candidate_len) continue; - const chosen_child_ty = chosen_ty.childType(); - const candidate_child_ty = candidate_ty.childType(); + const chosen_child_ty = chosen_ty.childType(mod); + const candidate_child_ty = candidate_ty.childType(mod); if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { const chosen_info = chosen_child_ty.intInfo(mod); const candidate_info = candidate_child_ty.intInfo(mod); @@ -30853,8 +30899,8 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } }, @@ -30874,8 +30920,7 @@ fn resolvePeerTypes( continue; }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); + const opt_child_ty = chosen_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30949,16 +30994,16 @@ fn resolvePeerTypes( if (convert_to_slice) { // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(); - var info = chosen_ty.ptrInfo(); - info.data.sentinel = chosen_child_ty.sentinel(); - info.data.size = .Slice; - info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(mod); - - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + const chosen_child_ty = chosen_ty.childType(mod); + var info = chosen_ty.ptrInfo(mod); + info.sentinel = chosen_child_ty.sentinel(mod); + info.size = .Slice; + info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.pointee_type = chosen_child_ty.elemType2(mod); + + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30970,22 +31015,22 @@ fn resolvePeerTypes( switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); - var info = ptr_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = ptr_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { - var info = chosen_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = chosen_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30998,7 +31043,7 @@ fn resolvePeerTypes( if (any_are_null) { const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty), + else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); @@ -31077,13 +31122,12 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return; - const elem_ty = ty.childType(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return; + const elem_ty = ty.childType(mod); return sema.resolveTypeLayout(elem_ty); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); // In case of querying the ABI alignment of this optional, we will ask // for hasRuntimeBits() of the payload type, so we need "requires comptime" // to be known already before this function returns. @@ -31343,10 +31387,10 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Pointer) { - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; @@ -31418,8 +31462,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -31478,12 +31522,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -31491,34 +31529,20 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.resolveTypeRequiresComptime(ty.childType()), + => return sema.resolveTypeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -31526,12 +31550,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -31609,7 +31629,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Pointer => { - const child_ty = try sema.resolveTypeFields(ty.childType()); + const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, .Struct => switch (ty.tag()) { @@ -31624,10 +31644,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { else => {}, }, .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType()), + .Array => return sema.resolveTypeFully(ty.childType(mod)), .Optional => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeFully(ty.optionalChild(&buf)); + return sema.resolveTypeFully(ty.optionalChild(mod)); }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { @@ -32897,10 +32916,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => @panic("TODO"), + .ptr_type => return null, .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -32963,34 +32986,15 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -33111,10 +33115,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) { + if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { return Value.initTag(.the_only_possible_value); } return null; @@ -33147,20 +33151,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .data = .{ .interned = ty.ip_index }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } else { + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - switch (ty.tag()) { - .manyptr_u8 => return .manyptr_u8_type, - .manyptr_const_u8 => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8 => return .const_slice_u8_type, - .anyerror_void_error_union => return .anyerror_void_error_union_type, - else => {}, - } - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33173,6 +33170,15 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; + if (val.ip_index != .none) { + if (@enumToInt(val.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + try sema.air_instructions.append(gpa, .{ + .tag = .interned, + .data = .{ .interned = val.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); try sema.air_instructions.append(gpa, .{ @@ -33331,7 +33337,8 @@ pub fn analyzeAddressSpace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const load_ty = ptr_ty.childType(); + const mod = sema.mod; + const load_ty = ptr_ty.childType(mod); const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); switch (res) { .runtime_load => return null, @@ -33422,11 +33429,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. -fn typePtrOrOptionalPtrTy( - sema: *Sema, - ty: Type, - buf: *Type.Payload.ElemType, -) !?Type { +fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33435,14 +33438,14 @@ fn typePtrOrOptionalPtrTy( .C => return ptr_type.elem_type.toType(), .One, .Many => return ty, }, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => return null, .Many, .One => { if (ptr_type.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - const payload_ty = o.payload_type.toType(); + const payload_ty = opt_child.toType(); if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } @@ -33456,25 +33459,9 @@ fn typePtrOrOptionalPtrTy( }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return ty.optionalChild(buf), - - .single_const_pointer_to_comptime_int, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return ty, - - .pointer => switch (ty.ptrSize()) { + .pointer => switch (ty.ptrSize(mod)) { .Slice => return null, - .C => return ty.optionalChild(buf), + .C => return ty.optionalChild(mod), else => return ty, }, @@ -33482,10 +33469,10 @@ fn typePtrOrOptionalPtrTy( .inferred_alloc_mut => unreachable, .optional => { - const child_type = ty.optionalChild(buf); + const child_type = ty.optionalChild(mod); if (child_type.zigTypeTag(mod) != .Pointer) return null; - const info = child_type.ptrInfo().data; + const info = child_type.ptrInfo(mod); switch (info.size) { .Slice, .C => return null, .Many, .One => { @@ -33518,8 +33505,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => return false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -33578,12 +33565,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } } return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -33591,34 +33572,20 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.typeRequiresComptime(ty.childType()), + => return sema.typeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -33626,12 +33593,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.typeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.typeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -33814,7 +33777,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33874,7 +33837,7 @@ fn intSub( ) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33934,7 +33897,7 @@ fn floatAdd( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33992,7 +33955,7 @@ fn floatSub( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34050,8 +34013,8 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34105,8 +34068,8 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); + const elem_ty = float_ty.childType(mod); + const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); @@ -34383,8 +34346,8 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34442,7 +34405,7 @@ fn compareAll( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); @@ -34490,7 +34453,7 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34511,10 +34474,10 @@ fn compareVector( /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(); + const parent_ty = ptr_ty.childType(mod); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34522,14 +34485,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { host_size: u16 = 0, alignment: u32 = 0, vector_index: VI = .none, - } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { + } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: { const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen()), + .host_size = @intCast(u16, parent_ty.arrayLen(mod)), .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 877a8f5f4c..7f599caafb 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -77,15 +77,6 @@ pub fn print( return writer.writeAll("(variable)"); while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .const_slice_u8_type => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - - .manyptr_u8_type => return writer.writeAll("[*]u8"), - .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .empty_struct_value, .aggregate => { if (level == 0) { return writer.writeAll(".{ ... }"); @@ -112,7 +103,7 @@ pub fn print( return writer.writeAll("}"); } else { const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -288,7 +279,7 @@ pub fn print( .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); const max_len = std.math.min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -306,7 +297,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ .ty = ty.elemType2(mod), - .val = ty.sentinel().?, + .val = ty.sentinel(mod).?, }, writer, level - 1, mod); return writer.writeAll(" }"); }, @@ -364,8 +355,7 @@ pub fn print( }, .opt_payload => { val = val.castTag(.opt_payload).?.data; - var buf: Type.Payload.ElemType = undefined; - ty = ty.optionalChild(&buf); + ty = ty.optionalChild(mod); return print(.{ .ty = ty, .val = val }, writer, level, mod); }, .eu_payload_ptr => { @@ -386,13 +376,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); @@ -415,13 +400,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 503bbdbb02..81169750c1 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -1030,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1140,17 +1140,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2406,9 +2403,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3024,8 +3021,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&opt_buf); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? @@ -3459,7 +3455,7 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 @@ -3617,7 +3613,7 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -3773,7 +3769,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4096,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -4173,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { @@ -4254,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4280,11 +4276,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4453,11 +4445,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4533,8 +4521,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4850,8 +4837,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; @@ -4947,11 +4933,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4973,11 +4960,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4999,11 +4987,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5025,11 +5014,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5511,11 +5501,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5833,11 +5819,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5957,12 +5939,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -6079,8 +6062,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 55ec0d4125..c08cb58c48 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1010,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1117,17 +1117,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2372,8 +2369,8 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { @@ -2474,7 +2471,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const elem_ty = array_ty.childType(); + const mod = self.bin_file.options.module.?; + const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); switch (mcv) { @@ -2508,11 +2506,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2659,8 +2653,8 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { @@ -2888,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -3004,7 +2998,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); @@ -3898,9 +3892,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = @intCast(u32, elem_ty.abiSize(mod)); @@ -4079,7 +4073,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4229,7 +4223,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4259,11 +4253,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4401,11 +4391,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4482,8 +4468,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4837,11 +4822,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4863,11 +4849,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4924,11 +4911,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4950,11 +4938,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5455,11 +5444,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5816,11 +5801,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5908,12 +5889,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -6026,8 +6008,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 488b937141..1e5858a948 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -807,7 +807,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -1099,9 +1099,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -1502,7 +1502,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -2496,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c565b6dc23..f8a62f9798 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -838,8 +838,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -871,12 +872,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -1300,7 +1302,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -1440,8 +1442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Pointer => Type.usize, .ErrorSet => Type.u16, .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -2447,6 +2448,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2456,8 +2458,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const index_mcv = try self.resolveInst(bin_op.rhs); const slice_ty = self.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const mod = self.bin_file.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2797,7 +2798,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -3001,9 +3002,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3019,7 +3020,7 @@ fn binOp( // multiplying it with elem_size const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); + const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null); return addr; } }, @@ -4042,11 +4043,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4269,7 +4266,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -4729,7 +4726,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7fc5dbc825..96304628e9 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1542,7 +1542,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; const ptr_ty = func.typeOfIndex(inst); - const pointee_ty = ptr_ty.childType(); + const pointee_ty = ptr_ty.childType(mod); if (func.initial_stack_value == .none) { try func.initializeStack(); @@ -1766,8 +1766,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { }, .Optional => { if (ty.isPtrLikeOptional(mod)) return false; - var buf: Type.Payload.ElemType = undefined; - const pl_type = ty.optionalChild(&buf); + const pl_type = ty.optionalChild(mod); if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, @@ -2139,7 +2138,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const child_type = func.typeOfIndex(inst).childType(); + const child_type = func.typeOfIndex(inst).childType(mod); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2161,7 +2160,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(mod); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2188,7 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); @@ -2301,8 +2300,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo().data; - const ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const ty = ptr_ty.childType(mod); if (ptr_info.host_size == 0) { try func.store(lhs, rhs, ty, 0); @@ -2360,8 +2359,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } @@ -2454,7 +2452,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2971,7 +2969,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue break :blk field_offset; }, }, - .Pointer => switch (parent_ty.ptrSize()) { + .Pointer => switch (parent_ty.ptrSize(mod)) { .Slice => switch (field_ptr.field_index) { 0 => 0, 1 => func.ptrSize(), @@ -3001,11 +2999,7 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); mod.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } @@ -3145,8 +3139,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .Optional => if (ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); } else if (val.isNull(mod)) { @@ -3217,8 +3210,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { else => unreachable, }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } @@ -3403,8 +3395,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO assert(!(lhs != .stack and rhs == .stack)); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for @@ -3609,19 +3600,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(mod); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3640,7 +3633,7 @@ fn structFieldPtr( const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { - if (result_ty.ptrInfo().data.host_size != 0) { + if (result_ty.ptrInfo(mod).host_size != 0) { break :offset @as(u32, 0); } break :offset struct_ty.packedStructFieldByteOffset(index, mod); @@ -3981,7 +3974,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4009,7 +4002,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4156,11 +4149,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const op_ty = func.typeOf(un_op); - const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; + const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); func.finishAir(inst, result, &.{un_op}); @@ -4171,8 +4165,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value @@ -4221,14 +4214,13 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); - const mod = func.bin_file.base.options.module.?; const result = result: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4242,9 +4234,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } @@ -4325,13 +4316,13 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto stack @@ -4355,11 +4346,11 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); @@ -4436,7 +4427,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(mod); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4448,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) }; + const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); @@ -4470,13 +4461,13 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = ptr_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack @@ -4507,12 +4498,12 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = func.typeOf(bin_op.lhs); - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); @@ -4544,9 +4535,9 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const pointee_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const pointee_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const valtype = typeToValtype(Type.usize, mod); @@ -4565,6 +4556,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4575,16 +4567,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const ptr = try func.resolveInst(bin_op.lhs); const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), .C, .Many => unreachable, }; - const elem_ty = if (ptr_ty.ptrSize() == .One) - ptr_ty.childType().childType() + const elem_ty = if (ptr_ty.ptrSize(mod) == .One) + ptr_ty.childType(mod).childType(mod) else - ptr_ty.childType(); + ptr_ty.childType(mod); const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); try func.memset(elem_ty, dst_ptr, len, value); @@ -4686,13 +4678,13 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = array_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = array_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); if (isByRef(array_ty, mod)) { @@ -4810,7 +4802,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOfIndex(inst); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { @@ -4859,7 +4851,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } const elem_size = elem_ty.bitSize(mod); - const vector_len = @intCast(usize, ty.vectorLen()); + const vector_len = @intCast(usize, ty.vectorLen(mod)); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } @@ -4895,7 +4887,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask = func.air.values[extra.mask]; const mask_len = extra.mask_len; - const child_ty = inst_ty.childType(); + const child_ty = inst_ty.childType(mod); const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop @@ -4959,16 +4951,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const result_ty = func.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); - const sentinel = if (result_ty.sentinel()) |sent| blk: { + const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5190,8 +5182,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: const mod = func.bin_file.base.options.module.?; assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); // We store the final result in here that will be validated // if the optional is truly equal. @@ -5268,7 +5259,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.typeOf(bin_op.lhs).childType(); + const un_ty = func.typeOf(bin_op.lhs).childType(mod); const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5398,7 +5389,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(mod); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5426,7 +5417,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); - const parent_ty = func.air.getRefType(ty_pl.ty).childType(); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { @@ -5455,10 +5446,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.typeOf(bin_op.lhs); - const ptr_elem_ty = dst_ty.childType(); + const ptr_elem_ty = dst_ty.childType(mod); const src = try func.resolveInst(bin_op.rhs); const src_ty = func.typeOf(bin_op.rhs); - const len = switch (dst_ty.ptrSize()) { + const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); if (ptr_elem_ty.abiSize(mod) != 1) { @@ -5470,7 +5461,7 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5551,7 +5542,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_ty = Type.const_slice_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -5857,7 +5848,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { - const new_ty = Type.initTag(.u128); + const new_ty = Type.u128; var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); defer lhs_upcast.free(func); var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); @@ -5878,7 +5869,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); const res = try func.allocLocal(lhs_ty); @@ -5902,19 +5893,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5942,7 +5933,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); try func.addLabel(.local_set, overflow_bit.local.value); - const tmp_result = try func.allocStack(Type.initTag(.u128)); + const tmp_result = try func.allocStack(Type.u128); try func.emitWValue(tmp_result); const mul3_msb = try func.load(mul3, Type.u64, 0); try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); @@ -6191,11 +6182,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(mod); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6845,11 +6837,11 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { // for each tag name, create an unnamed const, // and then get a pointer to its value. - var name_ty_payload: Type.Payload.Len = .{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = @intCast(u64, tag_name.len), - }; - const name_ty = Type.initPayload(&name_ty_payload.base); + const name_ty = try mod.arrayType(.{ + .len = tag_name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const string_bytes = &mod.string_literal_bytes; try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ @@ -6972,7 +6964,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -7068,7 +7060,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = func.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); @@ -7355,7 +7347,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ad67a0db3d..f6304a0ff3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2259,7 +2259,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; const ptr_ty = self.typeOfIndex(inst); - const val_ty = ptr_ty.childType(); + const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); @@ -2289,8 +2289,8 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, else => unreachable, @@ -2727,12 +2727,12 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); if (dst_ty.zigTypeTag(mod) == .Vector) { - assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(mod); - const src_info = src_ty.childType().intInfo(mod); + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); + const dst_info = dst_ty.childType(mod).intInfo(mod); + const src_info = src_ty.childType(mod).intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { - 16 => switch (dst_ty.vectorLen()) { + 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, else => null, @@ -2740,7 +2740,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, 16 => switch (src_info.bits) { - 32 => switch (dst_ty.vectorLen()) { + 32 => switch (dst_ty.vectorLen(mod)) { 1...4 => if (self.hasFeature(.avx)) .{ .vp_w, .ackusd } else if (self.hasFeature(.sse4_1)) @@ -2769,14 +2769,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }; const splat_val = Value.initPayload(&splat_pl.base); - var full_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), - .elem_type = src_ty.childType(), - }, - }; - const full_ty = Type.initPayload(&full_pl.base); + const full_ty = try mod.vectorType(.{ + .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .child = src_ty.childType(mod).ip_index, + }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); @@ -3587,7 +3583,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); - const opt_ty = src_ty.childType(); + const opt_ty = src_ty.childType(mod); const src_mcv = try self.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload(mod)) { @@ -3607,7 +3603,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const pl_ty = dst_ty.childType(); + const pl_ty = dst_ty.childType(mod); const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; @@ -3737,7 +3733,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -3777,7 +3773,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -3803,7 +3799,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -4057,7 +4053,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4116,7 +4112,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.typeOf(bin_op.rhs); @@ -4253,7 +4249,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.typeOf(bin_op.lhs); - const union_ty = ptr_union_ty.childType(); + const union_ty = ptr_union_ty.childType(mod); const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); @@ -4287,7 +4283,9 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl = ptr_union_ty.ptrInfo(); + var ptr_tag_pl: Type.Payload.Pointer = .{ + .data = ptr_union_ty.ptrInfo(mod), + }; ptr_tag_pl.data.pointee_type = tag_ty; const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); try self.store(ptr_tag_ty, adjusted_ptr, tag); @@ -4924,14 +4922,11 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var vec_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = try mod.intType(.signed, scalar_bits), - }, - }; - const vec_ty = Type.initPayload(&vec_pl.base); + const vec_ty = try mod.vectorType(.{ + .len = @divExact(abi_size * 8, scalar_bits), + .child = (try mod.intType(.signed, scalar_bits)).ip_index, + }); + const sign_val = switch (tag) { .neg => try vec_ty.minInt(stack.get(), mod), .fabs => try vec_ty.maxInt(stack.get(), mod), @@ -5034,15 +5029,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, @@ -5131,9 +5126,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) { 1 => { try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5184,13 +5179,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, } else null, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, @@ -5292,7 +5287,7 @@ fn reuseOperandAdvanced( fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.pointee_type; const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); @@ -5365,7 +5360,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const dst_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const dst_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5424,7 +5420,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { else try self.allocRegOrMem(inst, true); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { try self.load(dst_mcv, ptr_ty, ptr_mcv); @@ -5436,8 +5432,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; - const src_ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const src_ty = ptr_ty.childType(mod); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; @@ -5509,7 +5505,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In } fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const src_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5544,6 +5541,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -5553,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const ptr_mcv = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv); @@ -5578,11 +5576,11 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const mod = self.bin_file.options.module.?; const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); - const container_ty = ptr_container_ty.childType(); + const container_ty = ptr_container_ty.childType(mod); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and - ptr_field_ty.ptrInfo().data.host_size == 0) + ptr_field_ty.ptrInfo(mod).host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, @@ -5760,7 +5758,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.typeOfIndex(inst); - const parent_ty = inst_ty.childType(); + const parent_ty = inst_ty.childType(mod); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); @@ -6680,10 +6678,10 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(mod).bits) { - 8 => switch (lhs_ty.vectorLen()) { + .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) { + 8 => switch (lhs_ty.vectorLen(mod)) { 1...16 => switch (air_tag) { .add, .addwrap, @@ -6694,7 +6692,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6708,7 +6706,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6734,11 +6732,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6746,7 +6744,7 @@ fn genBinOp( }, else => null, }, - 16 => switch (lhs_ty.vectorLen()) { + 16 => switch (lhs_ty.vectorLen(mod)) { 1...8 => switch (air_tag) { .add, .addwrap, @@ -6760,7 +6758,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6770,7 +6768,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6795,11 +6793,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6807,7 +6805,7 @@ fn genBinOp( }, else => null, }, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => switch (air_tag) { .add, .addwrap, @@ -6826,7 +6824,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6840,7 +6838,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6869,11 +6867,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -6881,7 +6879,7 @@ fn genBinOp( }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => switch (air_tag) { .add, .addwrap, @@ -6910,8 +6908,8 @@ fn genBinOp( }, else => null, }, - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) { 1 => { const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7086,7 +7084,7 @@ fn genBinOp( }, else => null, } else null, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, @@ -7124,7 +7122,7 @@ fn genBinOp( } else null, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, @@ -7236,14 +7234,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_ss, .cmp }, 2...8 => .{ .v_ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_sd, .cmp }, 2...4 => .{ .v_pd, .cmp }, else => null, @@ -7270,13 +7268,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...8 => .{ .v_ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ .v_pd, .blendv }, else => null, }, @@ -7304,14 +7302,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._ss, .cmp }, 2...4 => .{ ._ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._sd, .cmp }, 2 => .{ ._pd, .cmp }, else => null, @@ -7337,13 +7335,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .blendv }, else => null, }, @@ -7368,13 +7366,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"and" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"and" }, else => null, }, @@ -7398,13 +7396,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .andn }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .andn }, else => null, }, @@ -7428,13 +7426,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"or" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"or" }, else => null, }, @@ -7586,11 +7584,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + const ptr_ty = try mod.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, @@ -8058,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -8506,10 +8500,11 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8683,8 +8678,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8775,9 +8769,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const opt_ty = ptr_ty.childType(); - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const opt_ty = ptr_ty.childType(mod); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8919,6 +8912,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8939,7 +8933,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isErr(inst, ptr_ty.childType(), operand); + const result = try self.isErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8953,6 +8947,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8973,7 +8968,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isNonErr(inst, ptr_ty.childType(), operand); + const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -9452,9 +9447,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Int => switch (ty.childType().intInfo(mod).bits) { - 8 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Int => switch (ty.childType(mod).intInfo(mod).bits) { + 8 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, @@ -9484,7 +9479,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 16 => switch (ty.vectorLen()) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9507,7 +9502,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_d, .mov } else @@ -9523,7 +9518,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_q, .mov } else @@ -9535,7 +9530,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9543,15 +9538,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 256 => switch (ty.vectorLen()) { + 256 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => switch (ty.vectorLen()) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9574,7 +9569,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else @@ -9590,7 +9585,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else @@ -9602,7 +9597,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -10248,8 +10243,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOfIndex(inst); const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = array_ty.arrayLen(); + const array_ty = ptr_ty.childType(mod); + const array_len = array_ty.arrayLen(mod); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -10790,16 +10785,16 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { - const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr, .One => dst_ptr, .C, .Many => unreachable, }; - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10815,7 +10810,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Store the first element, and then rely on memcpy copying forwards. // Length zero requires a runtime check - so we handle arrays specially // here to elide it. - switch (dst_ptr_ty.ptrSize()) { + switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); @@ -10858,13 +10853,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.performReloc(skip_reloc); }, .One => { - var elem_ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base); + const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); - const len = dst_ptr_ty.childType().arrayLen(); + const len = dst_ptr_ty.childType(mod).arrayLen(mod); assert(len != 0); // prevented by Sema try self.store(elem_ptr_ty, dst_ptr, src_val); @@ -10889,6 +10880,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); @@ -10906,9 +10898,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -11059,7 +11051,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { - 32 => switch (vector_ty.vectorLen()) { + 32 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11139,7 +11131,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 64 => switch (vector_ty.vectorLen()) { + 64 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11205,7 +11197,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 128 => switch (vector_ty.vectorLen()) { + 128 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11271,7 +11263,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { @@ -11375,7 +11367,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Array => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { @@ -11387,7 +11379,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } - if (result_ty.sentinel()) |sentinel| try self.genSetMem( + if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, elem_size * elements.len), elem_ty, @@ -11512,14 +11504,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd132 }, 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd132 }, 2...4 => .{ .v_pd, .fmadd132 }, else => null, @@ -11539,14 +11531,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd213 }, 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd213 }, 2...4 => .{ .v_pd, .fmadd213 }, else => null, @@ -11566,14 +11558,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd231 }, 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd231 }, 2...4 => .{ .v_pd, .fmadd231 }, else => null, diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index c8d20c73fa..ea75a1f4d2 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -76,7 +76,7 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { }; var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -158,8 +158,8 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(mod) * ty.arrayLen(); + const elem_ty = ty.childType(mod); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, diff --git a/src/codegen.zig b/src/codegen.zig index c9e2c6c265..a807400502 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -230,7 +230,7 @@ pub fn generateSymbol( .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); // The bytes payload already includes the sentinel, if any try code.ensureUnusedCapacity(len); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -241,7 +241,7 @@ pub fn generateSymbol( const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel()) |sent_val| { + if (typed_value.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } @@ -249,8 +249,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const elem_ty = typed_value.ty.childType(mod); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); for (elem_vals[0..len]) |elem_val| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, @@ -264,9 +264,9 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const sentinel = typed_value.ty.sentinel(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const sentinel = typed_value.ty.sentinel(mod); + const len = typed_value.ty.arrayLen(mod); var index: u64 = 0; while (index < len) : (index += 1) { @@ -292,8 +292,8 @@ pub fn generateSymbol( return Result.ok; }, .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(); - const sentinel_val = typed_value.ty.sentinel().?; + const elem_ty = typed_value.ty.childType(mod); + const sentinel_val = typed_value.ty.sentinel(mod).?; switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, .val = sentinel_val, @@ -618,8 +618,7 @@ pub fn generateSymbol( return Result.ok; }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = typed_value.ty.optionalChild(&opt_buf); + const payload_type = typed_value.ty.optionalChild(mod); const is_pl = !typed_value.val.isNull(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; @@ -751,7 +750,7 @@ pub fn generateSymbol( .Vector => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); @@ -761,8 +760,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const elem_ty = typed_value.ty.childType(mod); + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -782,8 +781,8 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const len = typed_value.ty.arrayLen(mod); const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -1188,7 +1187,7 @@ pub fn genTypedValue( switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), - .Pointer => switch (typed_value.ty.ptrSize()) { + .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, else => { switch (typed_value.val.tag()) { @@ -1219,9 +1218,8 @@ pub fn genTypedValue( if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - var buf: Type.Payload.ElemType = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.optionalChild(&buf), + .ty = typed_value.ty.optionalChild(mod), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd4f36e574..e6ec461e43 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -625,7 +625,9 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - var container_ptr_pl = ptr_ty.ptrInfo(); + var container_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; container_ptr_pl.data.pointee_type = field_ptr.container_ty; const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); @@ -653,7 +655,9 @@ pub const DeclGen = struct { try dg.writeCValue(writer, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -692,11 +696,10 @@ pub const DeclGen = struct { }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ptr.elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ptr.elem_ty.ip_index, + }); try writer.writeAll("&("); try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); @@ -704,11 +707,10 @@ pub const DeclGen = struct { }, .opt_payload_ptr, .eu_payload_ptr => { const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = payload_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); + const container_ptr_ty = try mod.ptrType(.{ + .elem_type = payload_ptr.container_ty.ip_index, + .size = .C, + }); // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); @@ -794,8 +796,7 @@ pub const DeclGen = struct { return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); @@ -889,11 +890,11 @@ pub const DeclGen = struct { return writer.writeAll(" }"); }, .Array, .Vector => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { var literal = stringLiteral(writer); try literal.start(); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); @@ -906,11 +907,11 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, initializer_type); + try dg.renderValue(writer, ty.childType(mod), val, initializer_type); } return writer.writeByte('}'); } @@ -1110,7 +1111,7 @@ pub const DeclGen = struct { // First try specific tag representations for more efficiency. switch (val.tag()) { .undef, .empty_struct_value, .empty_array => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { try dg.renderValue(writer, ai.elem_type, s, initializer_type); @@ -1128,9 +1129,9 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; + const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), }); }, else => { @@ -1142,7 +1143,7 @@ pub const DeclGen = struct { // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); @@ -1198,8 +1199,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const is_null_val = Value.makeBool(val.tag() == .null_value); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -2410,12 +2410,13 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { + const mod = o.dg.module; const writer = o.writer(); try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); @@ -2430,12 +2431,15 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (o.dg.module.error_name_list.items) |name| { + for (mod.error_name_list.items) |name| { @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; - var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2448,15 +2452,15 @@ pub fn genErrDecls(o: *Object) !void { } var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = o.dg.module.error_name_list.items.len, - .elem_type = Type.initTag(.const_slice_u8_sentinel_0), + .len = mod.error_name_list.items.len, + .elem_type = Type.const_slice_u8_sentinel_0, } }; const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; @@ -2487,6 +2491,7 @@ fn genExports(o: *Object) !void { } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const mod = o.dg.module; const w = o.writer(); const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; @@ -2495,7 +2500,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_slice_ty = Type.const_slice_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2514,11 +2519,11 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var int_pl: Value.Payload.U64 = undefined; const int_val = tag_val.enumToInt(enum_ty, &int_pl); - var name_ty_pl = Type.Payload.Len{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = name.len, - }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2547,7 +2552,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeAll("}\n"); }, .never_tail, .never_inline => |fn_decl_index| { - const fn_decl = o.dg.module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete); const fn_info = fn_cty.cast(CType.Payload.Function).?.data; @@ -3150,7 +3155,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); @@ -3166,7 +3171,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (elem_has_bits) try writer.writeByte('&'); - if (elem_has_bits and ptr_ty.ptrSize() == .One) { + if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); } else try f.writeCValue(writer, ptr, .Other); @@ -3264,7 +3269,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_type = inst_ty.elemType(); + const elem_type = inst_ty.childType(mod); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3280,7 +3285,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_ty = inst_ty.elemType(); + const elem_ty = inst_ty.childType(mod); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3323,7 +3328,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const src_ty = ptr_info.pointee_type; if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3412,7 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); const op_ty = f.typeOf(un_op); - const ret_ty = if (is_ptr) op_ty.childType() else op_ty; + const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3601,7 +3606,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); @@ -4156,7 +4161,7 @@ fn airCall( const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; @@ -4331,10 +4336,11 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(mod); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4826,7 +4832,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -5061,9 +5067,8 @@ fn airIsNull( } const operand_ty = f.typeOf(un_op); - const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; - var payload_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&payload_buf); + const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = optional_ty.optionalChild(mod); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -5097,8 +5102,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const opt_ty = f.typeOf(ty_op.operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; @@ -5132,10 +5136,10 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const ptr_ty = f.typeOf(ty_op.operand); - const opt_ty = ptr_ty.childType(); + const opt_ty = ptr_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { + if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } @@ -5163,7 +5167,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const opt_ty = operand_ty.elemType(); + const opt_ty = operand_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); @@ -5221,7 +5225,7 @@ fn fieldLocation( else .{ .identifier = container_ty.structFieldName(next_field_index) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, - .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) + .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, @@ -5243,7 +5247,7 @@ fn fieldLocation( }, .Packed => .begin, }, - .Pointer => switch (container_ty.ptrSize()) { + .Pointer => switch (container_ty.ptrSize(mod)) { .Slice => switch (field_index) { 0 => .{ .field = .{ .identifier = "ptr" } }, 1 => .{ .field = .{ .identifier = "len" } }, @@ -5280,7 +5284,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const container_ptr_ty = f.typeOfIndex(inst); - const container_ty = container_ptr_ty.childType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); @@ -5296,7 +5300,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5311,7 +5317,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5345,7 +5353,7 @@ fn fieldPtr( field_index: u32, ) !CValue { const mod = f.object.dg.module; - const container_ty = container_ptr_ty.elemType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. @@ -5365,7 +5373,9 @@ fn fieldPtr( try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5532,7 +5542,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); @@ -5569,7 +5579,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { @@ -5673,7 +5683,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(mod); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5761,7 +5771,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try reap(f, inst, &.{un_op}); const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); - const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const error_ty = err_union_ty.errorUnionSet(); @@ -5795,7 +5805,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(mod); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5811,7 +5821,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(); + const array_len = array_ty.arrayLen(mod); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; const len_val = Value.initPayload(&len_pl.base); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); @@ -6050,7 +6060,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); const ptr_ty = f.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const writer = f.object.writer(); const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); @@ -6152,7 +6162,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(pl_op.operand); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6207,7 +6217,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.typeOf(atomic_load.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const repr_ty = if (ty.isRuntimeFloat()) mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable @@ -6241,7 +6251,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6299,7 +6309,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", 0xaa, "); @@ -6311,8 +6321,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.print(", 0xaa, {d});\n", .{len}); @@ -6327,11 +6337,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ty.ip_index, + }); const index = try f.allocLocal(inst, Type.usize); @@ -6342,13 +6351,13 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" }); }, .One => { - const array_ty = dest_ty.childType(); - try writer.print("{d}", .{array_ty.arrayLen()}); + const array_ty = dest_ty.childType(mod); + try writer.print("{d}", .{array_ty.arrayLen(mod)}); }, .Many, .C => unreachable, } @@ -6377,7 +6386,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const bitcasted = try bitcast(f, Type.u8, value, elem_ty); try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", "); @@ -6387,8 +6396,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll(");\n"); }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.writeAll(", "); @@ -6416,9 +6425,9 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try writeSliceOrPtr(f, writer, src_ptr, src_ty); try writer.writeAll(", "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { - const elem_ty = dest_ty.childType(); + const elem_ty = dest_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { @@ -6428,10 +6437,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = dest_ty.childType(mod); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); - const len = array_ty.arrayLen() * elem_abi_size; + const len = array_ty.arrayLen(mod) * elem_abi_size; try writer.print("{d});\n", .{len}); }, .Many, .C => unreachable, @@ -6448,7 +6457,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(mod); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6777,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen()); + const len = @intCast(usize, inst_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); @@ -6796,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { - const elem_ty = inst_ty.childType(); + const elem_ty = inst_ty.childType(mod); const a = try Assignment.init(f, elem_ty); for (resolved_elements, 0..) |element, i| { try a.restart(f, writer); @@ -6806,7 +6815,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, element, .Other); try a.end(f, writer); } - if (inst_ty.sentinel()) |sentinel| { + if (inst_ty.sentinel(mod)) |sentinel| { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); try writer.print("[{d}]", .{resolved_elements.len}); @@ -7708,7 +7717,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; + var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; const local = try f.allocLocal(inst, Type.usize); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 27fa997fd3..9e6de6cb21 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1423,7 +1423,7 @@ pub const CType = extern union { }), .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); switch (info.size) { .Slice => { if (switch (kind) { @@ -1625,9 +1625,9 @@ pub const CType = extern union { .Vector => .vector, else => unreachable, }; - if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| { self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(), + .len = ty.arrayLenIncludingSentinel(mod), .elem_type = child_idx, } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1639,8 +1639,7 @@ pub const CType = extern union { }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5d9345c84f..f45a63df72 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -597,7 +597,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -1071,7 +1071,7 @@ pub const Object = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { @@ -1596,7 +1596,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.sentinel != null or ptr_info.@"addrspace" != .generic or @@ -1755,8 +1755,8 @@ pub const Object = struct { const array_di_ty = dib.createArrayType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, - try o.lowerDebugType(ty.childType(), .full), - @intCast(c_int, ty.arrayLen()), + try o.lowerDebugType(ty.childType(mod), .full), + @intCast(c_int, ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); @@ -1781,14 +1781,14 @@ pub const Object = struct { break :blk dib.createBasicType(name, info.bits, dwarf_encoding); }, .Bool => dib.createBasicType("bool", 1, DW.ATE.boolean), - else => try o.lowerDebugType(ty.childType(), .full), + else => try o.lowerDebugType(ty.childType(mod), .full), }; const vector_di_ty = dib.createVectorType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, elem_di_type, - ty.vectorLen(), + ty.vectorLen(mod), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); @@ -1797,8 +1797,7 @@ pub const Object = struct { .Optional => { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); @@ -2350,11 +2349,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = fn_info.return_type, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { @@ -2364,11 +2359,7 @@ pub const Object = struct { if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = o.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } @@ -2376,11 +2367,7 @@ pub const Object = struct { if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (isByRef(param_ty, mod)) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = param_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(param_ty); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { try param_di_types.append(try o.lowerDebugType(param_ty, .full)); @@ -2843,7 +2830,7 @@ pub const DeclGen = struct { }; return dg.context.structType(&fields, fields.len, .False); } - const ptr_info = t.ptrInfo().data; + const ptr_info = t.ptrInfo(mod); const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, @@ -2866,19 +2853,18 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Array => { - const elem_ty = t.childType(); + const elem_ty = t.childType(mod); assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); - const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); + const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.lowerType(t.childType()); - return elem_type.vectorType(t.vectorLen()); + const elem_type = try dg.lowerType(t.childType(mod)); + return elem_type.vectorType(t.vectorLen(mod)); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&buf); + const child_ty = t.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } @@ -3173,11 +3159,7 @@ pub const DeclGen = struct { if (fn_info.return_type.isError(mod) and mod.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = dg.object.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3199,9 +3181,8 @@ pub const DeclGen = struct { .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - var opt_buf: Type.Payload.ElemType = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) + param_ty.optionalChild(mod).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); const ptr_llvm_ty = try dg.lowerType(ptr_ty); @@ -3247,7 +3228,7 @@ pub const DeclGen = struct { const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) @@ -3417,7 +3398,7 @@ pub const DeclGen = struct { return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, .null_value, .zero => { const llvm_type = try dg.lowerType(tv.ty); @@ -3425,7 +3406,7 @@ pub const DeclGen = struct { }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty.fmtDebug(), tag, @@ -3436,14 +3417,14 @@ pub const DeclGen = struct { const bytes = tv.val.castTag(.bytes).?.data; return dg.context.constString( bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), .True, // Don't null terminate. Bytes has the sentinel, if any. ); }, .str_lit => { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel()) |sent_val| { + if (tv.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( @@ -3472,9 +3453,9 @@ pub const DeclGen = struct { }, .aggregate => { const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); const llvm_elems = try gpa.alloc(*llvm.Value, len); defer gpa.free(llvm_elems); var need_unnamed = false; @@ -3498,9 +3479,9 @@ pub const DeclGen = struct { }, .repeated => { const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const sentinel = tv.ty.sentinel(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const len_including_sent = len + @boolToInt(sentinel != null); const gpa = dg.gpa; const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); @@ -3534,8 +3515,8 @@ pub const DeclGen = struct { } }, .empty_array_sentinel => { - const elem_ty = tv.ty.elemType(); - const sent_val = tv.ty.sentinel().?; + const elem_ty = tv.ty.childType(mod); + const sent_val = tv.ty.sentinel(mod).?; const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); @@ -3550,8 +3531,7 @@ pub const DeclGen = struct { }, .Optional => { comptime assert(optional_layout_version == 3); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = tv.ty.optionalChild(&buf); + const payload_ty = tv.ty.optionalChild(mod); const llvm_i8 = dg.context.intType(8); const is_pl = !tv.val.isNull(mod); @@ -3897,10 +3877,10 @@ pub const DeclGen = struct { .bytes => { // Note, sentinel is not stored even if the type has a sentinel. const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3923,9 +3903,9 @@ pub const DeclGen = struct { // Note, sentinel is not stored even if the type has a sentinel. // The value includes the sentinel in those cases. const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3939,8 +3919,8 @@ pub const DeclGen = struct { .repeated => { // Note, sentinel is not stored even if the type has a sentinel. const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { @@ -3955,10 +3935,10 @@ pub const DeclGen = struct { // Note, sentinel is not stored const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -4006,13 +3986,10 @@ pub const DeclGen = struct { ptr_val: Value, decl_index: Module.Decl.Index, ) Error!*llvm.Value { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } @@ -4135,9 +4112,8 @@ pub const DeclGen = struct { .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); + const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or payload_ty.optionalReprIsPayload(mod)) { @@ -4251,7 +4227,8 @@ pub const DeclGen = struct { } fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value { - const alignment = ptr_ty.ptrInfo().data.@"align"; + const mod = dg.module; + const alignment = ptr_ty.ptrInfo(mod).@"align"; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4374,7 +4351,7 @@ pub const DeclGen = struct { ) void { const mod = dg.module; if (param_ty.isPtrAtRuntime(mod)) { - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); @@ -4786,7 +4763,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; const fn_info = zig_fn_ty.fnInfo(); @@ -5014,7 +4991,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { @@ -5098,11 +5075,7 @@ pub const FuncGen = struct { const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); _ = self.builder.buildRetVoid(); return null; @@ -5150,11 +5123,11 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); const fn_info = self.dg.decl.ty.fnInfo(); - const mod = self.dg.module; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5301,15 +5274,13 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var opt_buffer: Type.Payload.ElemType = undefined; - const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { - const payload_ty = operand_ty.optionalChild(&opt_buffer); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.optionalReprIsPayload(mod)) { @@ -5506,11 +5477,12 @@ pub const FuncGen = struct { } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5661,9 +5633,9 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); - const array_ty = operand_ty.childType(); + const array_ty = operand_ty.childType(mod); const llvm_usize = try self.dg.lowerType(Type.usize); - const len = llvm_usize.constInt(array_ty.arrayLen(), .False); + const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5806,20 +5778,20 @@ pub const FuncGen = struct { const mod = fg.dg.module; const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); }, .One => { - const array_ty = ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = ty.childType(mod); + const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); + return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); }, .Many, .C => unreachable, } @@ -5832,10 +5804,11 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); - const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); + const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod)); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); } @@ -5847,7 +5820,7 @@ pub const FuncGen = struct { const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; @@ -5863,13 +5836,14 @@ pub const FuncGen = struct { } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType()); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod)); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5884,7 +5858,7 @@ pub const FuncGen = struct { const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; if (isByRef(elem_ty, mod)) { @@ -5923,7 +5897,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5951,14 +5925,14 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const elem_ptr = self.air.getRefType(ty_pl.ty); - if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr; + if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr; const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); if (ptr_ty.isSinglePointer(mod)) { @@ -6098,7 +6072,7 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); - const parent_ty = self.air.getRefType(ty_pl.ty).childType(); + const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); @@ -6232,6 +6206,7 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -6243,7 +6218,7 @@ pub const FuncGen = struct { name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.object.lowerDebugType(ptr_ty.childType(), .full), + try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full), true, // always preserve 0, // flags ); @@ -6365,7 +6340,7 @@ pub const FuncGen = struct { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); - const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); + const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod)); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6466,7 +6441,7 @@ pub const FuncGen = struct { // an elementtype() attribute. if (constraint[0] == '*') { llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - try self.dg.lowerPtrElemTy(arg_ty.childType()); + try self.dg.lowerPtrElemTy(arg_ty.childType(mod)); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6657,14 +6632,13 @@ pub const FuncGen = struct { operand_is_ptr: bool, pred: llvm.IntPredicate, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - const mod = self.dg.module; + const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") @@ -6709,7 +6683,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -6748,9 +6722,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. @@ -6770,9 +6743,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = self.context.intType(8).constInt(1, .False); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. @@ -6827,9 +6799,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const result_ty = self.typeOfIndex(inst); - const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; + const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; @@ -6862,7 +6834,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { @@ -6895,7 +6867,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6961,11 +6933,7 @@ pub const FuncGen = struct { if (isByRef(optional_ty, mod)) { const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -6995,11 +6963,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(ok_err_code, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); return result_ptr; } @@ -7027,11 +6991,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(operand, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7076,7 +7036,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); + const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); @@ -7287,7 +7247,7 @@ pub const FuncGen = struct { const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7361,7 +7321,7 @@ pub const FuncGen = struct { if (scalar_ty.isSignedInt(mod)) { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7384,13 +7344,14 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; @@ -7409,14 +7370,15 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ @@ -7587,7 +7549,7 @@ pub const FuncGen = struct { }; if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(); + const vec_len = ty.vectorLen(mod); const vector_result_ty = llvm_i32.vectorType(vec_len); var result = vector_result_ty.getUndef(); @@ -7672,8 +7634,8 @@ pub const FuncGen = struct { const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); + const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { @@ -7720,7 +7682,7 @@ pub const FuncGen = struct { const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); - return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); + return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } break :b libc_fn; @@ -7887,7 +7849,7 @@ pub const FuncGen = struct { const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); if (rhs_ty.zigTypeTag(mod) == .Vector) { - const vec_len = rhs_ty.vectorLen(); + const vec_len = rhs_ty.vectorLen(mod); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); @@ -8059,7 +8021,7 @@ pub const FuncGen = struct { } if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } @@ -8074,7 +8036,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); @@ -8087,7 +8049,7 @@ pub const FuncGen = struct { } return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); @@ -8108,7 +8070,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { @@ -8207,7 +8169,7 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); + const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8218,7 +8180,7 @@ pub const FuncGen = struct { fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,11 +8194,11 @@ pub const FuncGen = struct { } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); - const mod = self.dg.module; + const operand_ty = ptr_ty.childType(mod); const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { @@ -8271,8 +8233,10 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { + const mod = fg.dg.module; + const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -8288,7 +8252,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const ptr = try fg.resolveInst(ty_op.operand); elide: { @@ -8363,7 +8327,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).childType(mod); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8409,7 +8373,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.typeOf(pl_op.operand); - const operand_ty = ptr_ty.elemType(); + const operand_ty = ptr_ty.childType(mod); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); @@ -8464,7 +8428,7 @@ pub const FuncGen = struct { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -8497,7 +8461,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); + const operand_ty = ptr_ty.childType(mod); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); @@ -8595,9 +8559,9 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), + .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8665,7 +8629,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(mod); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8791,7 +8755,7 @@ pub const FuncGen = struct { // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); if (operand_ty.zigTypeTag(mod) == .Vector) { - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -8980,7 +8944,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9097,10 +9061,11 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9122,7 +9087,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9298,14 +9263,14 @@ pub const FuncGen = struct { .ty = scalar_ty, .val = Value.initPayload(&init_value_payload.base), }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value); + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); @@ -9400,7 +9365,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); - const array_info = result_ty.arrayInfo(); + const array_info = result_ty.arrayInfo(mod); var elem_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = array_info.elem_type, @@ -9720,7 +9685,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space @@ -9763,9 +9728,8 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !*llvm.Value { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); const mod = fg.dg.module; + const payload_ty = opt_ty.optionalChild(mod); if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. @@ -9827,13 +9791,13 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const struct_ty = struct_ptr_ty.childType(); const mod = self.dg.module; + const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.typeOfIndex(inst); - const result_ty_info = result_ty.ptrInfo().data; + const result_ty_info = result_ty.ptrInfo(mod); if (result_ty_info.host_size != 0) { // From LLVM's perspective, a pointer to a packed struct and a pointer @@ -9919,7 +9883,7 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { const mod = self.dg.module; - const info = ptr_ty.ptrInfo().data; + const info = ptr_ty.ptrInfo(mod); if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); @@ -9954,7 +9918,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9992,9 +9956,9 @@ pub const FuncGen = struct { elem: *llvm.Value, ordering: llvm.AtomicOrdering, ) !void { - const info = ptr_ty.ptrInfo().data; - const elem_ty = info.pointee_type; const mod = self.dg.module; + const info = ptr_ty.ptrInfo(mod); + const elem_ty = info.pointee_type; if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } @@ -10026,7 +9990,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit @@ -10864,8 +10828,7 @@ const ParamTypeIterator = struct { .Unspecified, .Inline => { it.zig_index += 1; it.llvm_index += 1; - var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) { it.llvm_index += 1; return .slice; } else if (isByRef(ty, mod)) { @@ -11185,8 +11148,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { return true; }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index f69c6cb317..9de2c03142 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -625,20 +625,20 @@ pub const DeclGen = struct { .Array => switch (val.tag()) { .aggregate => { const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way. + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. for (elem_vals[0..len]) |elem_val| { try self.lower(elem_ty, elem_val); } }, .repeated => { const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLen()); + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLen(mod)); for (0..len) |_| { try self.lower(elem_ty, elem_val); } - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.lower(elem_ty, sentinel); } }, @@ -646,7 +646,7 @@ pub const DeclGen = struct { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, @@ -706,8 +706,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const has_payload = !val.isNull(mod); const abi_size = ty.abiSize(mod); @@ -1216,10 +1215,10 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .float_type = .{ .bits = bits } }); }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse { - return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()}); + const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { + return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; return self.spv.arrayType(total_len, elem_ty_ref); }, @@ -1248,7 +1247,7 @@ pub const DeclGen = struct { }, }, .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); const storage_class = spvStorageClass(ptr_info.@"addrspace"); const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect); @@ -1280,8 +1279,8 @@ pub const DeclGen = struct { // TODO: Properly verify sizes and child type. return try self.spv.resolve(.{ .vector_type = .{ - .component_type = try self.resolveType(ty.elemType(), repr), - .component_count = @intCast(u32, ty.vectorLen()), + .component_type = try self.resolveType(ty.childType(mod), repr), + .component_count = @intCast(u32, ty.vectorLen(mod)), } }); }, .Struct => { @@ -1335,8 +1334,7 @@ pub const DeclGen = struct { } }); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity @@ -1685,7 +1683,8 @@ pub const DeclGen = struct { } fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ @@ -1701,7 +1700,8 @@ pub const DeclGen = struct { } fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ .Volatile = ptr_ty.isVolatilePtr(), @@ -2072,7 +2072,7 @@ pub const DeclGen = struct { const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2138,9 +2138,10 @@ pub const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { + const mod = self.module; const result_ty_ref = try self.resolveType(result_ty, .direct); - switch (ptr_ty.ptrSize()) { + switch (ptr_ty.ptrSize(mod)) { .One => { // Pointer to array // TODO: Is this correct? @@ -2498,7 +2499,7 @@ pub const DeclGen = struct { // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); + const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod))); if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. @@ -2516,7 +2517,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,6 +2527,7 @@ pub const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); @@ -2536,9 +2538,9 @@ pub const DeclGen = struct { // If we have a pointer-to-array, construct an element pointer to use with load() // If we pass ptr_ty directly, it will attempt to load the entire array rather than // just an element. - var elem_ptr_info = ptr_ty.ptrInfo(); - elem_ptr_info.data.size = .One; - const elem_ptr_ty = Type.initPayload(&elem_ptr_info.base); + var elem_ptr_info = ptr_ty.ptrInfo(mod); + elem_ptr_info.size = .One; + const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info); return try self.load(elem_ptr_ty, elem_ptr_id); } @@ -2586,7 +2588,7 @@ pub const DeclGen = struct { field_index: u32, ) !?IdRef { const mod = self.module; - const object_ty = object_ptr_ty.childType(); + const object_ty = object_ptr_ty.childType(mod); switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO @@ -2662,9 +2664,10 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ptr_ty = self.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace() == .generic); - const child_ty = ptr_ty.childType(); + assert(ptr_ty.ptrAddressSpace(mod) == .generic); + const child_ty = ptr_ty.childType(mod); const child_ty_ref = try self.resolveType(child_ty, .indirect); return try self.alloc(child_ty_ref, null); } @@ -2834,7 +2837,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); @@ -2971,8 +2974,7 @@ pub const DeclGen = struct { const operand_id = try self.resolve(un_op); const optional_ty = self.typeOf(un_op); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); const bool_ty_ref = try self.resolveType(Type.bool, .direct); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 1d4840aeb7..c5ba429ec9 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -11,7 +11,8 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const ZigDecl = @import("../../Module.zig").Decl; +const ZigModule = @import("../../Module.zig"); +const ZigDecl = ZigModule.Decl; const spec = @import("spec.zig"); const Word = spec.Word; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 682431203e..178f9fa64c 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -219,8 +219,7 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } - var buf = try arena.create(Type.Payload.ElemType); - const payload_ty = ty.optionalChild(buf); + const payload_ty = ty.optionalChild(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata @@ -304,7 +303,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); } }, .Array => { @@ -315,7 +314,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); // DW.AT.subrange_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 @@ -323,7 +322,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); // DW.AT.count, DW.FORM.udata - const len = ty.arrayLenIncludingSentinel(); + const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); // DW.AT.array_type delimit children try dbg_info_buffer.append(0); @@ -688,7 +687,7 @@ pub const DeclState = struct { const mod = self.mod; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const child_ty = if (is_ptr) ty.childType() else ty; + const child_ty = if (is_ptr) ty.childType(mod) else ty; switch (loc) { .register => |reg| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 0154207368..fb7ca3a87f 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2931,7 +2931,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2988,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated diff --git a/src/print_air.zig b/src/print_air.zig index f4a1aeae32..8717bdc6bf 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -433,9 +433,10 @@ const Writer = struct { } fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen()); + const len = @intCast(usize, vector_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); try w.writeType(s, vector_ty); @@ -512,10 +513,11 @@ const Writer = struct { } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(mod); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); diff --git a/src/type.zig b/src/type.zig index 868ae4231b..1f970919c9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -40,7 +40,7 @@ pub const Type = struct { .ptr_type => return .Pointer, .array_type => return .Array, .vector_type => return .Vector, - .optional_type => return .Optional, + .opt_type => return .Optional, .error_union_type => return .ErrorUnion, .struct_type => return .Struct, .union_type => return .Union, @@ -118,38 +118,17 @@ pub const Type = struct { .function => return .Fn, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => return .Array, - .vector => return .Vector, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => return .Pointer, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => return .Optional, + .optional => return .Optional, - .anyerror_void_error_union, .error_union => return .ErrorUnion, + .error_union => return .ErrorUnion, .anyframe_T => return .AnyFrame, @@ -177,8 +156,7 @@ pub const Type = struct { return switch (self.zigTypeTag(mod)) { .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { - var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(mod); + return self.optionalChild(mod).baseZigTypeTag(mod); }, else => |t| t, }; @@ -218,8 +196,7 @@ pub const Type = struct { .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), .Optional => { if (!is_equality_cmp) return false; - var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); }, }; } @@ -275,9 +252,8 @@ pub const Type = struct { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -287,281 +263,61 @@ pub const Type = struct { return null; } - pub fn castPointer(self: Type) ?*Payload.ElemType { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => self.cast(Payload.ElemType), - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - else => null, - }; - } - /// If it is a function pointer, returns the function type. Otherwise returns null. pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } - pub fn ptrIsMutable(ty: Type) bool { - return switch (ty.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .many_const_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .c_const_pointer, - .const_slice, - => false, - - .single_mut_pointer, - .many_mut_pointer, - .manyptr_u8, - .c_mut_pointer, - .mut_slice, - => true, - - .pointer => ty.castTag(.pointer).?.data.mutable, - - else => unreachable, + pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.mutable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| !ptr_type.is_const, + else => unreachable, + }, }; } - pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 }; - pub fn arrayInfo(self: Type) ArrayInfo { + pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, + }; + + pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { return .{ - .len = self.arrayLen(), - .sentinel = self.sentinel(), - .elem_type = self.elemType(), + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), }; } - pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.ip_index) { - .none => switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.comptime_int, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data, + .optional => b: { + const child_type = ty.optionalChild(mod); + break :b child_type.ptrInfo(mod); }, else => unreachable, }, - else => @panic("TODO"), - } + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + else => unreachable, + }, + else => unreachable, + }, + }; } pub fn eql(a: Type, b: Type, mod: *Module) bool { @@ -658,20 +414,17 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, - .vector, => { if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; - if (a.arrayLen() != b.arrayLen()) + if (a.arrayLen(mod) != b.arrayLen(mod)) return false; - const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType(), mod)) + const elem_ty = a.childType(mod); + if (!elem_ty.eql(b.childType(mod), mod)) return false; - const sentinel_a = a.sentinel(); - const sentinel_b = b.sentinel(); + const sentinel_a = a.sentinel(mod); + const sentinel_b = b.sentinel(mod); if (sentinel_a) |sa| { if (sentinel_b) |sb| { return sa.eql(sb, elem_ty, mod); @@ -683,28 +436,14 @@ pub const Type = struct { } }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { if (b.zigTypeTag(mod) != .Pointer) return false; - const info_a = a.ptrInfo().data; - const info_b = b.ptrInfo().data; + const info_a = a.ptrInfo(mod); + const info_b = b.ptrInfo(mod); if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) return false; if (info_a.@"align" != info_b.@"align") @@ -743,18 +482,13 @@ pub const Type = struct { return true; }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { if (b.zigTypeTag(mod) != .Optional) return false; - var buf_a: Payload.ElemType = undefined; - var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod); + return a.optionalChild(mod).eql(b.optionalChild(mod), mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); @@ -947,47 +681,23 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => { std.hash.autoHash(hasher, std.builtin.TypeId.Array); - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.arrayLen()); + const elem_ty = ty.childType(mod); + std.hash.autoHash(hasher, ty.arrayLen(mod)); hashWithHasher(elem_ty, hasher, mod); - hashSentinel(ty.sentinel(), elem_ty, hasher, mod); + hashSentinel(ty.sentinel(mod), elem_ty, hasher, mod); }, - .vector => { - std.hash.autoHash(hasher, std.builtin.TypeId.Vector); - - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.vectorLen()); - hashWithHasher(elem_ty, hasher, mod); - }, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); hashWithHasher(info.pointee_type, hasher, mod); hashSentinel(info.sentinel, info.pointee_type, hasher, mod); std.hash.autoHash(hasher, info.@"align"); @@ -1001,17 +711,13 @@ pub const Type = struct { std.hash.autoHash(hasher, info.size); }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { std.hash.autoHash(hasher, std.builtin.TypeId.Optional); - var buf: Payload.ElemType = undefined; - hashWithHasher(ty.optionalChild(&buf), hasher, mod); + hashWithHasher(ty.optionalChild(mod), hasher, mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); const set_ty = ty.errorUnionSet(); @@ -1023,7 +729,7 @@ pub const Type = struct { .anyframe_T => { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(), hasher, mod); + hashWithHasher(ty.childType(mod), hasher, mod); }, .empty_struct => { @@ -1129,33 +835,12 @@ pub const Type = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => unreachable, - .array_u8, - .array_u8_sentinel_0, - => return self.copyPayloadShallow(allocator, Payload.Len), - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => { const payload = self.cast(Payload.ElemType).?; @@ -1170,13 +855,6 @@ pub const Type = struct { }; }, - .vector => { - const payload = self.castTag(.vector).?.data; - return Tag.vector.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, .array => { const payload = self.castTag(.array).?.data; return Tag.array.create(allocator, .{ @@ -1408,13 +1086,6 @@ pub const Type = struct { }); }, - .anyerror_void_error_union => return writer.writeAll("anyerror!void"), - .const_slice_u8 => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), - .manyptr_u8 => return writer.writeAll("[*]u8"), - .manyptr_const_u8 => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1447,20 +1118,6 @@ pub const Type = struct { ty = return_type; continue; }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - return writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - return writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try payload.elem_type.dump("", .{}, writer); - return writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1512,72 +1169,12 @@ pub const Type = struct { try writer.writeAll("}"); return; }, - .single_const_pointer => { - const pointee_type = ty.castTag(.single_const_pointer).?.data; - try writer.writeAll("*const "); - ty = pointee_type; - continue; - }, - .single_mut_pointer => { - const pointee_type = ty.castTag(.single_mut_pointer).?.data; - try writer.writeAll("*"); - ty = pointee_type; - continue; - }, - .many_const_pointer => { - const pointee_type = ty.castTag(.many_const_pointer).?.data; - try writer.writeAll("[*]const "); - ty = pointee_type; - continue; - }, - .many_mut_pointer => { - const pointee_type = ty.castTag(.many_mut_pointer).?.data; - try writer.writeAll("[*]"); - ty = pointee_type; - continue; - }, - .c_const_pointer => { - const pointee_type = ty.castTag(.c_const_pointer).?.data; - try writer.writeAll("[*c]const "); - ty = pointee_type; - continue; - }, - .c_mut_pointer => { - const pointee_type = ty.castTag(.c_mut_pointer).?.data; - try writer.writeAll("[*c]"); - ty = pointee_type; - continue; - }, - .const_slice => { - const pointee_type = ty.castTag(.const_slice).?.data; - try writer.writeAll("[]const "); - ty = pointee_type; - continue; - }, - .mut_slice => { - const pointee_type = ty.castTag(.mut_slice).?.data; - try writer.writeAll("[]"); - ty = pointee_type; - continue; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); ty = child_type; continue; }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - ty = pointee_type; - continue; - }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - ty = pointee_type; - continue; - }, .pointer => { const payload = ty.castTag(.pointer).?.data; @@ -1680,7 +1277,7 @@ pub const Type = struct { .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), @@ -1733,14 +1330,6 @@ pub const Type = struct { try decl.renderFullyQualifiedName(mod, writer); }, - .anyerror_void_error_union => try writer.writeAll("anyerror!void"), - .const_slice_u8 => try writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), - .manyptr_u8 => try writer.writeAll("[*]u8"), - .manyptr_const_u8 => try writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => try writer.writeAll("[*:0]const u8"), - .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1799,20 +1388,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - try writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - try writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try print(payload.elem_type, writer, mod); - try writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1865,17 +1440,8 @@ pub const Type = struct { try writer.writeAll("}"); }, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const info = ty.ptrInfo().data; + .pointer => { + const info = ty.ptrInfo(mod); if (info.sentinel) |s| switch (info.size) { .One, .C => unreachable, @@ -1920,16 +1486,6 @@ pub const Type = struct { try writer.writeByte('?'); try print(child_type, writer, mod); }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - try print(pointee_type, writer, mod); - }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - try print(pointee_type, writer, mod); - }, .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -1963,12 +1519,6 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, - .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, - .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, - .manyptr_u8 => return Value{ .ip_index = .manyptr_u8_type, .legacy = undefined }, - .manyptr_const_u8 => return Value{ .ip_index = .manyptr_const_u8_type, .legacy = undefined }, - .manyptr_const_u8_sentinel_0 => return Value{ .ip_index = .manyptr_const_u8_sentinel_0_type, .legacy = undefined }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), @@ -1996,10 +1546,41 @@ pub const Type = struct { ) RuntimeBitsError!bool { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type.bits != 0, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => |ptr_type| { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); + return !comptimeOnly(ty, mod); + }, + .array_type => |array_type| { + if (array_type.sentinel != .none) { + return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } else { + return array_type.len > 0 and + try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } + }, + .vector_type => |vector_type| { + return vector_type.len > 0 and + try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .opt_type => |child| { + const child_ty = child.toType(); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -2058,14 +1639,7 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; switch (ty.tag()) { - .const_slice_u8, - .const_slice_u8_sentinel_0, - .array_u8_sentinel_0, - .anyerror_void_error_union, .error_set_inferred, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .@"opaque", .error_set_single, @@ -2077,22 +1651,12 @@ pub const Type = struct { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. .anyframe_T, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag(mod) == .Fn) { - return !ty.childType().fnInfo().is_generic; + } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { + return !ty.childType(mod).fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -2101,7 +1665,6 @@ pub const Type = struct { }, // These are false because they are comptime-only types. - .single_const_pointer_to_comptime_int, .empty_struct, .empty_struct_literal, // These are function *bodies*, not pointers. @@ -2111,8 +1674,7 @@ pub const Type = struct { => return false, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { // Then the optional is comptime-known to be null. return false; @@ -2200,10 +1762,9 @@ pub const Type = struct { } }, - .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array => return ty.arrayLen(mod) != 0 and + try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); @@ -2224,14 +1785,14 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return true, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .ptr_type => true, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .vector_type => true, + .opt_type => |child| child.toType().isPtrLikeOptional(mod), + .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, .f64, @@ -2287,23 +1848,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .array_u8, - .array_u8_sentinel_0, .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, .enum_numbered, - .vector, - .optional_single_mut_pointer, - .optional_single_const_pointer, => true, .error_set, @@ -2313,13 +1859,8 @@ pub const Type = struct { .@"opaque", // These are function bodies, not function pointers. .function, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, .enum_simple, .error_union, - .anyerror_void_error_union, .anyframe_T, .tuple, .anon_struct, @@ -2336,7 +1877,7 @@ pub const Type = struct { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(mod), + => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, @@ -2417,76 +1958,36 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { - switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - const child_type = ty.cast(Payload.ElemType).?.data; - if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return 1, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => { + const ptr_info = ty.castTag(.pointer).?.data; + if (ptr_info.@"align" != 0) { + return ptr_info.@"align"; + } else if (opt_sema) |sema| { + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } + }, + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; - } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => @panic("TODO"), }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - - else => unreachable, } } - pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace { + pub fn ptrAddressSpace(self: Type, mod: *const Module) std.builtin.AddressSpace { return switch (self.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .generic, - .pointer => self.castTag(.pointer).?.data.@"addrspace", .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrAddressSpace(); + const child_type = self.optionalChild(mod); + return child_type.ptrAddressSpace(mod); }, else => unreachable, @@ -2530,15 +2031,31 @@ pub const Type = struct { ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); + const opt_sema = switch (strat) { + .sema => |sema| sema, + else => null, + }; + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => { + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + }, + .array_type => |array_type| { + return array_type.child.toType().abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); + const bits = @intCast(u32, bits_u64); + const bytes = ((bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return AbiAlignmentAdvanced{ .scalar = alignment }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -2617,15 +2134,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; switch (ty.tag()) { - .array_u8_sentinel_0, - .array_u8, - .@"opaque", - => return AbiAlignmentAdvanced{ .scalar = 1 }, + .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, // represents machine code; not a pointer .function => { @@ -2634,47 +2144,21 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, .pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set_single, .error_set, .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), - - .vector => { - const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); - const bytes = ((bits * len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); - return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; - }, + .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -2933,8 +2417,29 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => |arena| return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(arena, ty), + }, + }; + const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); + const elem_bits = @intCast(u32, elem_bits_u64); + const total_bits = elem_bits * vector_type.len; + const total_bytes = (total_bits + 7) / 8; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(strat.lazy, ty), + }, + }; + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -3014,7 +2519,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .single_const_pointer_to_comptime_int, .empty_struct_literal, .empty_struct, => return AbiSizeAdvanced{ .scalar = 0 }, @@ -3068,8 +2572,6 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, - .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { @@ -3093,47 +2595,7 @@ pub const Type = struct { } }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, - }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * payload.len; - const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, - }; - const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, - - .anyframe_T, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, @@ -3141,7 +2603,6 @@ pub const Type = struct { }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set, .error_set_merged, @@ -3149,8 +2610,7 @@ pub const Type = struct { => return AbiSizeAdvanced{ .scalar = 2 }, .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); if (child_type.isNoReturn()) { return AbiSizeAdvanced{ .scalar = 0 }; @@ -3272,8 +2732,12 @@ pub const Type = struct { .int_type => |int_type| return int_type.bits, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16 => return 16, @@ -3339,7 +2803,6 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .single_const_pointer_to_comptime_int => unreachable, .empty_struct => unreachable, .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, @@ -3388,13 +2851,6 @@ pub const Type = struct { return size; }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return elem_bit_size * payload.len; - }, - .array_u8 => return 8 * ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -3415,43 +2871,13 @@ pub const Type = struct { .anyframe_T => return target.ptrBitWidth(), - .const_slice, - .mut_slice, - => return target.ptrBitWidth() * 2, - - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return target.ptrBitWidth() * 2, - - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - return target.ptrBitWidth(); - }, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => { - return target.ptrBitWidth(); - }, - .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth(), }, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return target.ptrBitWidth(), - .error_set, .error_set_single, - .anyerror_void_error_union, .error_set_inferred, .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type @@ -3481,12 +2907,11 @@ pub const Type = struct { return true; }, .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(mod); + if (ty.arrayLenIncludingSentinel(mod) == 0) return true; + return ty.childType(mod).layoutIsResolved(mod); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { @@ -3500,9 +2925,6 @@ pub const Type = struct { pub fn isSinglePointer(ty: Type, mod: *const Module) bool { switch (ty.ip_index) { .none => return switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, .inferred_alloc_const, .inferred_alloc_mut, => true, @@ -3519,54 +2941,33 @@ pub const Type = struct { } /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty).?; + pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; } /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size { - return switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => .Slice, - - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .Many, - - .c_const_pointer, - .c_mut_pointer, - => .C, - - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => .One, + pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .inferred_alloc_const, + .inferred_alloc_mut, + => .One, - .pointer => ty.castTag(.pointer).?.data.size, + .pointer => ty.castTag(.pointer).?.data.size, - else => null, + else => null, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size, + else => null, + }, }; } pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, - .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -3583,78 +2984,28 @@ pub const Type = struct { pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { switch (self.tag()) { - .const_slice_u8 => return Type.initTag(.manyptr_const_u8), - .const_slice_u8_sentinel_0 => return Type.initTag(.manyptr_const_u8_sentinel_0), - - .const_slice => { - const elem_type = self.castTag(.const_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .mut_slice => { - const elem_type = self.castTag(.mut_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .pointer => { const payload = self.castTag(.pointer).?.data; assert(payload.size == .Slice); - if (payload.sentinel != null or - payload.@"align" != 0 or - payload.@"addrspace" != .generic or - payload.bit_offset != 0 or - payload.host_size != 0 or - payload.vector_index != .none or - payload.@"allowzero" or - payload.@"volatile") - { - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - } else if (payload.mutable) { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = payload.pointee_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - } else { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = payload.pointee_type, + buffer.* = .{ + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .vector_index = payload.vector_index, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, }, - }; - return Type.initPayload(&buffer.elem_type.base); - } + }, + }; + return Type.initPayload(&buffer.pointer.base); }, else => unreachable, @@ -3663,19 +3014,7 @@ pub const Type = struct { pub fn isConstPtr(self: Type) bool { return switch (self.tag()) { - .single_const_pointer, - .many_const_pointer, - .c_const_pointer, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => true, - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, }; } @@ -3702,49 +3041,46 @@ pub const Type = struct { pub fn isCPtr(self: Type) bool { return switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - => return true, - .pointer => self.castTag(.pointer).?.data.size == .C, else => return false, }; } - pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { - switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .manyptr_u8, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_const_pointer_to_comptime_int, - .single_mut_pointer, - => return true, + pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return false, + .One, .Many, .C => return true, + }, - .pointer => switch (self.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, - }, + .optional => { + const child_type = ty.optionalChild(mod); + if (child_type.zigTypeTag(mod) != .Pointer) return false; + const info = child_type.ptrInfo(mod); + switch (info.size) { + .Slice, .C => return false, + .Many, .One => return !info.@"allowzero", + } + }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag(mod) != .Pointer) return false; - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } + else => return false, + }, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.size) { + .Slice, .C => false, + .Many, .One => !p.is_allowzero, + }, + else => false, + }, + else => false, }, - - else => return false, } } @@ -3754,23 +3090,17 @@ pub const Type = struct { if (ty.isPtrLikeOptional(mod)) { return true; } - return ty.ptrInfo().data.@"allowzero"; + return ty.ptrInfo(mod).@"allowzero"; } /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; switch (child_ty.zigTypeTag(mod)) { .Pointer => { - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .C => return false, .Slice, .Many, .One => return !info.@"allowzero", @@ -3793,7 +3123,7 @@ pub const Type = struct { pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .C, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => false, .Many, .One => !ptr_type.is_allowzero, @@ -3803,16 +3133,10 @@ pub const Type = struct { else => false, }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; if (child_ty.zigTypeTag(mod) != .Pointer) return false; - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .Slice, .C => return false, .Many, .One => return !info.@"allowzero", @@ -3828,43 +3152,24 @@ pub const Type = struct { /// For *[N]T, returns [N]T. /// For *T, returns T. /// For [*]T, returns T. - pub fn childType(ty: Type) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => ty.castTag(.pointer).?.data.pointee_type, + pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn childTypeIp(ty: Type, ip: InternPool) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => ty.castTag(.pointer).?.data.pointee_type, + + else => unreachable, + }, + else => ip.childType(ty.ip_index).toType(), }; } - /// Asserts the type is a pointer or array type. - /// TODO this is deprecated in favor of `childType`. - pub const elemType = childType; - /// For *[N]T, returns T. /// For ?*T, returns T. /// For ?*[N]T, returns T. @@ -3875,54 +3180,42 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .single_const_pointer, - .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(mod), - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(mod); - } else { - return child_ty; - } - }, - .optional => ty.castTag(.optional).?.data.childType(), - .optional_single_mut_pointer => ty.castPointer().?.data, - .optional_single_const_pointer => ty.castPointer().?.data, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => { + const info = ty.castTag(.pointer).?.data; + const child_ty = info.pointee_type; + if (info.size == .One) { + return child_ty.shallowElemType(mod); + } else { + return child_ty; + } + }, + .optional => ty.castTag(.optional).?.data.childType(mod), - .anyframe_T => ty.castTag(.anyframe_T).?.data, + .anyframe_T => ty.castTag(.anyframe_T).?.data, - else => unreachable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One => ptr_type.elem_type.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.elem_type.toType(), + }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), + else => unreachable, + }, }; } fn shallowElemType(child_ty: Type, mod: *const Module) Type { return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(), + .Array, .Vector => child_ty.childType(mod), else => child_ty, }; } @@ -3930,7 +3223,7 @@ pub const Type = struct { /// For vectors, returns the element type. Otherwise returns self. pub fn scalarType(ty: Type, mod: *const Module) Type { return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(), + .Vector => ty.childType(mod), else => ty, }; } @@ -3938,51 +3231,25 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Resulting `Type` will have inner memory referencing `buf`. /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, buf: *Payload.ElemType) Type { - return switch (ty.tag()) { - .optional => ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - buf.* = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, - .optional_single_const_pointer => { - buf.* = .{ - .base = .{ .tag = .single_const_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, + pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .optional => ty.castTag(.optional).?.data, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, + .pointer, // here we assume it is a C pointer + => return ty, - else => unreachable, - }; - } - - /// Asserts that the type is an optional. - /// Same as `optionalChild` but allocates the buffer if needed. - pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type { - switch (ty.tag()) { - .optional => return ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - return Tag.single_mut_pointer.create(allocator, ty.castPointer().?.data); + else => unreachable, }, - .optional_single_const_pointer => { - return Tag.single_const_pointer.create(allocator, ty.castPointer().?.data); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.size == .C); + break :b ty; + }, + else => unreachable, }, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, - - else => unreachable, - } + }; } /// Returns the tag type of a union, if the type is a union and it has a tag type. @@ -4071,19 +3338,25 @@ pub const Type = struct { } /// Asserts that the type is an error union. - pub fn errorUnionPayload(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.void, - .error_union => self.castTag(.error_union).?.data.payload, - else => unreachable, + pub fn errorUnionPayload(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.void, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.payload, + else => unreachable, + }, + else => @panic("TODO"), }; } - pub fn errorUnionSet(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.anyerror, - .error_union => self.castTag(.error_union).?.data.error_set, - else => unreachable, + pub fn errorUnionSet(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.anyerror, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.error_set, + else => unreachable, + }, + else => @panic("TODO"), }; } @@ -4168,67 +3441,73 @@ pub const Type = struct { } /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type) u64 { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.len, - .array => ty.castTag(.array).?.data.len, - .array_sentinel => ty.castTag(.array_sentinel).?.data.len, - .array_u8 => ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct, .empty_struct_literal => 0, + pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return arrayLenIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.len, + .array_sentinel => ty.castTag(.array_sentinel).?.data.len, + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), + .empty_struct, .empty_struct_literal => 0, + + else => unreachable, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + else => unreachable, + }, }; } - pub fn arrayLenIncludingSentinel(ty: Type) u64 { - return ty.arrayLen() + @boolToInt(ty.sentinel() != null); + pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return ty.arrayLen(mod) + @boolToInt(ty.sentinel(mod) != null); } - pub fn vectorLen(ty: Type) u32 { - return switch (ty.tag()) { - .vector => @intCast(u32, ty.castTag(.vector).?.data.len), - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), - else => unreachable, + pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), + .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }, }; } /// Asserts the type is an array, pointer or vector. - pub fn sentinel(self: Type) ?Value { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .vector, - .array, - .array_u8, - .manyptr_u8, - .manyptr_const_u8, - .const_slice_u8, - .const_slice, - .mut_slice, - .tuple, - .empty_struct_literal, - .@"struct", - => return null, + pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array, + .tuple, + .empty_struct_literal, + .@"struct", + => null, - .pointer => return self.castTag(.pointer).?.data.sentinel, - .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel, + .pointer => ty.castTag(.pointer).?.data.sentinel, + .array_sentinel => ty.castTag(.array_sentinel).?.data.sentinel, - .array_u8_sentinel_0, - .const_slice_u8_sentinel_0, - .manyptr_const_u8_sentinel_0, - => return Value.zero, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type, + .struct_type, + => null, - else => unreachable, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + + else => unreachable, + }, }; } @@ -4292,8 +3571,6 @@ pub const Type = struct { return .{ .signedness = .unsigned, .bits = 16 }; }, - .vector => ty = ty.castTag(.vector).?.data.elem_type, - .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); @@ -4321,8 +3598,9 @@ pub const Type = struct { .int_type => |int_type| return int_type, .ptr_type => unreachable, .array_type => unreachable, - .vector_type => @panic("TODO"), - .optional_type => unreachable, + .vector_type => |vector_type| ty = vector_type.child.toType(), + + .opt_type => unreachable, .error_union_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), @@ -4426,7 +3704,11 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type) Type { - const fn_ty = if (ty.castPointer()) |p| p.data else ty; + const fn_ty = switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.pointee_type, + .function => ty, + else => unreachable, + }; return fn_ty.castTag(.function).?.data.return_type; } @@ -4516,8 +3798,12 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4580,34 +3866,15 @@ pub const Type = struct { .error_set, .error_set_merged, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -4690,10 +3957,10 @@ pub const Type = struct { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue(mod) != null) + if (ty.childType(mod).onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4711,9 +3978,9 @@ pub const Type = struct { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .array_type => |array_type| return array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| return vector_type.child.toType().comptimeOnly(mod), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4772,12 +4039,6 @@ pub const Type = struct { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -4785,35 +4046,21 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, // These are function bodies, not function pointers. - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return ty.childType().comptimeOnly(mod), + => return ty.childType(mod).comptimeOnly(mod), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { @@ -4821,12 +4068,8 @@ pub const Type = struct { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(mod); + .optional => { + return ty.optionalChild(mod).comptimeOnly(mod); }, .tuple, .anon_struct => { @@ -4882,6 +4125,10 @@ pub const Type = struct { }; } + pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; + } + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, @@ -4892,9 +4139,9 @@ pub const Type = struct { pub fn isIndexable(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -4904,10 +4151,10 @@ pub const Type = struct { pub fn indexableHasLen(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5527,14 +4774,6 @@ pub const Type = struct { /// with different enum tags, because the the former requires more payload data than the latter. /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - manyptr_u8, - manyptr_const_u8, - manyptr_const_u8_sentinel_0, - single_const_pointer_to_comptime_int, - const_slice_u8, - const_slice_u8_sentinel_0, - anyerror_void_error_union, /// Same as `empty_struct` except it has an empty namespace. empty_struct_literal, /// This is a special value that tracks a set of types that have been stored @@ -5545,28 +4784,15 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - array_u8, - array_u8_sentinel_0, array, array_sentinel, - vector, /// Possible Value tags for this: @"struct" tuple, /// Possible Value tags for this: @"struct" anon_struct, pointer, - single_const_pointer, - single_mut_pointer, - many_const_pointer, - many_mut_pointer, - c_const_pointer, - c_mut_pointer, - const_slice, - mut_slice, function, optional, - optional_single_mut_pointer, - optional_single_const_pointer, error_union, anyframe_T, error_set, @@ -5590,33 +4816,12 @@ pub const Type = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int, - .anyerror_void_error_union, - .const_slice_u8, - .const_slice_u8_sentinel_0, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - .array_u8, - .array_u8_sentinel_0, - => Payload.Len, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => Payload.ElemType, @@ -5624,7 +4829,7 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .array, .vector => Payload.Array, + .array => Payload.Array, .array_sentinel => Payload.ArraySentinel, .pointer => Payload.Pointer, .function => Payload.Function, @@ -5847,15 +5052,28 @@ pub const Type = struct { @"volatile": bool = false, size: std.builtin.Type.Pointer.Size = .One, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, - _, - }; + pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; return abiAlignment(data.pointee_type, mod); } + + pub fn fromKey(p: InternPool.Key.PtrType) Data { + return .{ + .pointee_type = p.elem_type.toType(), + .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, + .@"align" = p.alignment, + .@"addrspace" = p.address_space, + .bit_offset = p.bit_offset, + .host_size = p.host_size, + .vector_index = p.vector_index, + .@"allowzero" = p.is_allowzero, + .mutable = !p.is_const, + .@"volatile" = p.is_volatile, + .size = p.size, + }; + } }; }; @@ -5986,6 +5204,17 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined }; + pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined }; + pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, + .legacy = undefined, + }; + pub const const_slice_u8_sentinel_0: Type = .{ + .ip_index = .const_slice_u8_sentinel_0_type, + .legacy = undefined, + }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const err_int = Type.u16; @@ -6019,50 +5248,6 @@ pub const Type = struct { } } - if (d.@"align" == 0 and d.@"addrspace" == .generic and - d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and - !d.@"allowzero" and !d.@"volatile") - { - if (d.sentinel) |sent| { - if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.const_slice_u8_sentinel_0); - } - }, - .Many => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.manyptr_const_u8_sentinel_0); - } - }, - else => {}, - } - } - } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => return Type.initTag(.const_slice_u8), - .Many => return Type.initTag(.manyptr_const_u8), - else => {}, - } - } else { - const T = Type.Tag; - const type_payload = try arena.create(Type.Payload.ElemType); - type_payload.* = .{ - .base = .{ - .tag = switch (d.size) { - .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer, - .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer, - .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer, - .Slice => if (d.mutable) T.mut_slice else T.const_slice, - }, - }, - .data = d.pointee_type, - }; - return Type.initPayload(&type_payload.base); - } - } - return Type.Tag.pointer.create(arena, d); } @@ -6073,13 +5258,21 @@ pub const Type = struct { elem_type: Type, mod: *Module, ) Allocator.Error!Type { - if (elem_type.eql(Type.u8, mod)) { - if (sent) |some| { - if (some.eql(Value.zero, elem_type, mod)) { - return Tag.array_u8_sentinel_0.create(arena, len); + if (elem_type.ip_index != .none) { + if (sent) |s| { + if (s.ip_index != .none) { + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = s.ip_index, + }); } } else { - return Tag.array_u8.create(arena, len); + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = .none, + }); } } @@ -6097,24 +5290,11 @@ pub const Type = struct { }); } - pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type { - return Tag.vector.create(arena, .{ - .len = len, - .elem_type = elem_type, - }); - } - - pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type { - switch (child_type.tag()) { - .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( - arena, - child_type.elemType(), - ), - .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( - arena, - child_type.elemType(), - ), - else => return Type.Tag.optional.create(arena, child_type), + pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type { + if (child_type.ip_index != .none) { + return mod.optionalType(child_type.ip_index); + } else { + return Type.Tag.optional.create(arena, child_type); } } @@ -6125,12 +5305,6 @@ pub const Type = struct { mod: *Module, ) Allocator.Error!Type { assert(error_set.zigTypeTag(mod) == .ErrorSet); - if (error_set.eql(Type.anyerror, mod) and - payload.eql(Type.void, mod)) - { - return Type.initTag(.anyerror_void_error_union); - } - return Type.Tag.error_union.create(arena, .{ .error_set = error_set, .payload = payload, diff --git a/src/value.zig b/src/value.zig index cbf18c672c..6f7210c884 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,14 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - manyptr_u8_type, - manyptr_const_u8_type, - manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, - anyerror_void_error_union_type, - undef, zero, one, @@ -140,11 +132,6 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -153,9 +140,6 @@ pub const Value = struct { .empty_struct_value, .empty_array, .null_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -280,9 +264,7 @@ pub const Value = struct { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -305,11 +287,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -318,9 +295,6 @@ pub const Value = struct { .empty_array, .null_value, .empty_struct_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => unreachable, .ty, .lazy_align, .lazy_size => { @@ -553,14 +527,6 @@ pub const Value = struct { } var val = start_val; while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), - .const_slice_u8_type => return out_stream.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"), - .manyptr_u8_type => return out_stream.writeAll("[*]u8"), - .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"), - .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { return out_stream.writeAll("(aggregate)"); @@ -674,7 +640,7 @@ pub const Value = struct { switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null); + const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); const adjusted_bytes = bytes[0..adjusted_len]; return allocator.dupe(u8, adjusted_bytes); }, @@ -686,7 +652,7 @@ pub const Value = struct { .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); @memset(result, byte); return result; }, @@ -701,7 +667,7 @@ pub const Value = struct { const slice = val.castTag(.slice).?.data; return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), + else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), } } @@ -720,13 +686,6 @@ pub const Value = struct { if (self.ip_index != .none) return self.ip_index.toType(); return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .const_slice_u8_type => Type.initTag(.const_slice_u8), - .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), - .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .manyptr_u8_type => Type.initTag(.manyptr_u8), - .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), - .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), else => unreachable, }; @@ -1096,8 +1055,8 @@ pub const Value = struct { else => unreachable, }, .Array => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; @@ -1150,8 +1109,7 @@ pub const Value = struct { }, .Optional => { if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); @@ -1220,9 +1178,9 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); - const len = @intCast(usize, ty.arrayLen()); + const len = @intCast(usize, ty.arrayLen(mod)); var bits: u16 = 0; var elem_i: usize = 0; @@ -1267,8 +1225,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); @@ -1335,9 +1292,9 @@ pub const Value = struct { else => unreachable, }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); @@ -1386,8 +1343,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromMemory(child, mod, buffer, arena); }, else => @panic("TODO implement readFromMemory for more types"), @@ -1449,8 +1405,8 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elem_ty = ty.childType(mod); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); @@ -1483,8 +1439,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); }, else => @panic("TODO implement readFromPackedMemory for more types"), @@ -1956,7 +1911,7 @@ pub const Value = struct { pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); @@ -2092,8 +2047,7 @@ pub const Value = struct { .opt_payload => { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; - var buffer: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buffer); + const payload_ty = ty.optionalChild(mod); return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); }, .slice => { @@ -2175,7 +2129,7 @@ pub const Value = struct { return true; } - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); for (a_field_vals, 0..) |a_elem, i| { const b_elem = b_field_vals[i]; @@ -2239,8 +2193,8 @@ pub const Value = struct { return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var i: usize = 0; var a_buf: ElemValueBuffer = undefined; var b_buf: ElemValueBuffer = undefined; @@ -2253,11 +2207,11 @@ pub const Value = struct { } return true; }, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { - const a_len = switch (a_ty.ptrSize()) { + const a_len = switch (a_ty.ptrSize(mod)) { .Slice => a.sliceLen(mod), - .One => a_ty.childType().arrayLen(), + .One => a_ty.childType(mod).arrayLen(mod), else => unreachable, }; if (a_len != b.sliceLen(mod)) { @@ -2266,7 +2220,7 @@ pub const Value = struct { var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - const a_ptr = switch (a_ty.ptrSize()) { + const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, else => unreachable, @@ -2412,8 +2366,8 @@ pub const Value = struct { else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2438,8 +2392,7 @@ pub const Value = struct { if (val.castTag(.opt_payload)) |payload| { std.hash.autoHash(hasher, true); // non-null const sub_val = payload.data; - var buffer: Type.Payload.ElemType = undefined; - const sub_ty = ty.optionalChild(&buffer); + const sub_ty = ty.optionalChild(mod); sub_val.hash(sub_ty, hasher, mod); } else { std.hash.autoHash(hasher, false); // null @@ -2534,8 +2487,8 @@ pub const Value = struct { else => val.hashPtr(hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2544,8 +2497,7 @@ pub const Value = struct { } }, .Optional => if (val.castTag(.opt_payload)) |payload| { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); payload.data.hashUncoerced(child_ty, hasher, mod); } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { @@ -2720,7 +2672,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2729,7 +2681,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2737,7 +2689,7 @@ pub const Value = struct { .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(); + return payload.field_ty.arrayLen(mod); } else { return 1; } @@ -3137,7 +3089,7 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen()); + const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3250,7 +3202,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3298,7 +3250,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3345,8 +3297,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen()); - const result_data = try arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3408,7 +3360,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3452,7 +3404,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3527,7 +3479,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3565,7 +3517,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3601,7 +3553,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3631,7 +3583,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3666,7 +3618,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3701,7 +3653,7 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3741,7 +3693,7 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3781,7 +3733,7 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3857,7 +3809,7 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3904,7 +3856,7 @@ pub const Value = struct { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3950,7 +3902,7 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3986,7 +3938,7 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4007,7 +3959,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4038,7 +3990,7 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4078,8 +4030,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4136,7 +4088,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4184,7 +4136,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4212,7 +4164,7 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4264,7 +4216,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4300,7 +4252,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4359,7 +4311,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4418,7 +4370,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4477,7 +4429,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4530,7 +4482,7 @@ pub const Value = struct { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4570,7 +4522,7 @@ pub const Value = struct { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4610,7 +4562,7 @@ pub const Value = struct { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4650,7 +4602,7 @@ pub const Value = struct { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4690,7 +4642,7 @@ pub const Value = struct { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4730,7 +4682,7 @@ pub const Value = struct { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4770,7 +4722,7 @@ pub const Value = struct { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4810,7 +4762,7 @@ pub const Value = struct { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4850,7 +4802,7 @@ pub const Value = struct { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4890,7 +4842,7 @@ pub const Value = struct { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4930,7 +4882,7 @@ pub const Value = struct { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4970,7 +4922,7 @@ pub const Value = struct { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5010,7 +4962,7 @@ pub const Value = struct { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5050,7 +5002,7 @@ pub const Value = struct { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5097,7 +5049,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); -- cgit v1.2.3 From 9ec0017f460854300004ab263bf585c2d376d1fb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 16:32:38 -0700 Subject: stage2: migrate many pointer types to the InternPool --- src/Air.zig | 14 +++++--- src/InternPool.zig | 41 ++++++++++++++------- src/Sema.zig | 42 +++++++++++----------- src/arch/aarch64/CodeGen.zig | 8 +++-- src/arch/arm/CodeGen.zig | 8 +++-- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/codegen/c.zig | 12 +++---- src/codegen/llvm.zig | 10 +++--- src/codegen/spirv.zig | 13 ++++--- src/type.zig | 85 ++++++++++++++++++++++++++++++++------------ 11 files changed, 152 insertions(+), 85 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 3c04d17073..43fc55e811 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1427,8 +1427,11 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[inst_index] == .const_ty); - return air_datas[inst_index].ty; + return switch (air_tags[inst_index]) { + .const_ty => air_datas[inst_index].ty, + .interned => air_datas[inst_index].interned.toType(), + else => unreachable, + }; } /// Returns the requested data, as well as the new index which is at the start of the @@ -1492,6 +1495,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, + .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1717,8 +1721,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index 1da0572bd4..36afbadf3d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -73,7 +73,7 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. - alignment: u16 = 0, + alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. @@ -90,9 +90,9 @@ pub const Key = union(enum) { /// an appropriate value for this field. address_space: std.builtin.AddressSpace = .generic, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, + pub const VectorIndex = enum(u16) { + none = std.math.maxInt(u16), + runtime = std.math.maxInt(u16) - 1, _, }; }; @@ -806,16 +806,33 @@ pub const Pointer = struct { sentinel: Index, flags: Flags, packed_offset: PackedOffset, - vector_index: VectorIndex, + + /// Stored as a power-of-two, with one special value to indicate none. + pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } + }; pub const Flags = packed struct(u32) { - alignment: u16, + size: Size, + alignment: Alignment, is_const: bool, is_volatile: bool, is_allowzero: bool, - size: Size, address_space: AddressSpace, - _: u7 = undefined, + vector_index: VectorIndex, }; pub const PackedOffset = packed struct(u32) { @@ -928,13 +945,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { return .{ .ptr_type = .{ .elem_type = ptr_info.child, .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment, + .alignment = ptr_info.flags.alignment.toByteUnits(0), .size = ptr_info.flags.size, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, - .vector_index = ptr_info.vector_index, + .vector_index = ptr_info.flags.vector_index, .host_size = ptr_info.packed_offset.host_size, .bit_offset = ptr_info.packed_offset.bit_offset, } }; @@ -1003,18 +1020,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = ptr_type.alignment, + .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, .size = ptr_type.size, .address_space = ptr_type.address_space, + .vector_index = ptr_type.vector_index, }, .packed_offset = .{ .host_size = ptr_type.host_size, .bit_offset = ptr_type.bit_offset, }, - .vector_index = ptr_type.vector_index, }), }); }, diff --git a/src/Sema.zig b/src/Sema.zig index 8abe6484ee..39f39b43d9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8400,7 +8400,7 @@ fn analyzeOptionalPayloadPtr( const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, - .mutable = !optional_ptr_ty.isConstPtr(), + .mutable = !optional_ptr_ty.isConstPtr(mod), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); @@ -8594,7 +8594,7 @@ fn analyzeErrUnionPayloadPtr( const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, - .mutable = !operand_ty.isConstPtr(), + .mutable = !operand_ty.isConstPtr(mod), .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); @@ -10147,7 +10147,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( @@ -10166,7 +10166,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); @@ -15292,10 +15292,10 @@ fn zirCmpEq( } // comparing null with optionals - if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr())) { + if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } - if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr())) { + if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } @@ -22254,7 +22254,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const target = sema.mod.getTarget(); const mod = sema.mod; - if (dest_ty.isConstPtr()) { + if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } @@ -22452,7 +22452,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); - if (dest_ptr_ty.isConstPtr()) { + if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } @@ -24206,7 +24206,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24227,7 +24227,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24897,7 +24897,7 @@ fn unionFieldPtr( const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(mod), - .@"volatile" = union_ptr_ty.isVolatilePtr(), + .@"volatile" = union_ptr_ty.isVolatilePtr(mod), .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25239,7 +25239,7 @@ fn tupleFieldPtr( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), - .@"volatile" = tuple_ptr_ty.isVolatilePtr(), + .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); @@ -25767,7 +25767,7 @@ fn coerceExtra( } // coercion from C pointer - if (inst_ty.isCPtr()) src_c_ptr: { + if (inst_ty.isCPtr(mod)) src_c_ptr: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. @@ -27255,7 +27255,7 @@ fn storePtr2( ) CompileError!void { const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.isConstPtr()) + if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(mod); @@ -29843,7 +29843,7 @@ fn analyzeSlice( const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -29902,7 +29902,7 @@ fn analyzeSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -30720,7 +30720,7 @@ fn resolvePeerTypes( err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); } } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30876,12 +30876,12 @@ fn resolvePeerTypes( .Optional => { const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(); + seen_const = seen_const or opt_child_ty.isConstPtr(mod); any_are_null = true; continue; } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); any_are_null = false; chosen = candidate; chosen_i = candidate_i + 1; @@ -30924,7 +30924,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } @@ -31023,7 +31023,7 @@ fn resolvePeerTypes( var info = chosen_ty.ptrInfo(mod); info.sentinel = chosen_child_ty.sentinel(mod); info.size = .Slice; - info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); info.pointee_type = chosen_child_ty.elemType2(mod); const new_ptr_ty = try Type.ptr(sema.arena, mod, info); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 81169750c1..4a10691e02 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3430,9 +3430,10 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3496,9 +3497,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3869,7 +3871,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c08cb58c48..3591ead53d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2428,9 +2428,10 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2527,9 +2528,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2738,7 +2740,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1e5858a948..1008d527f6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1536,7 +1536,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f8a62f9798..83e4b4f93d 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1827,7 +1827,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd3974bc91..b0fb9fa480 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6117,7 +6117,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6159,7 +6159,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6221,7 +6221,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (use_atomic) try writer.writeAll("zig_atomic("); try f.renderType(writer, ty); if (use_atomic) try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6265,7 +6265,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", (zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6299,7 +6299,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6365,7 +6365,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) { + if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 558534a651..7fa9b74334 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7046,7 +7046,7 @@ pub const FuncGen = struct { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); + load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); break :blk load_inst; }; const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); @@ -8221,7 +8221,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); const dest_ptr_align = ptr_ty.ptrAlignment(mod); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); + _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } @@ -8497,7 +8497,7 @@ pub const FuncGen = struct { const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); - const is_volatile = ptr_ty.isVolatilePtr(); + const is_volatile = ptr_ty.isVolatilePtr(mod); if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { @@ -8621,7 +8621,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const mod = self.dg.module; - const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); + const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); _ = self.builder.buildMemCpy( dest_ptr, dest_ptr_ty.ptrAlignment(mod), @@ -9894,7 +9894,7 @@ pub const FuncGen = struct { if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); - const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); + const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod)); assert(info.vector_index != .runtime); if (info.vector_index != .none) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 5fa81d19ff..27a79c1c45 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1689,7 +1689,7 @@ pub const DeclGen = struct { const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = self.typeId(indirect_value_ty_ref), @@ -1705,7 +1705,7 @@ pub const DeclGen = struct { const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, @@ -2464,9 +2464,10 @@ pub const DeclGen = struct { } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2479,9 +2480,10 @@ pub const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2781,10 +2783,11 @@ pub const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); - if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; return try self.load(ptr_ty, operand); } diff --git a/src/type.zig b/src/type.zig index 4840bca6e7..db8c116f70 100644 --- a/src/type.zig +++ b/src/type.zig @@ -193,7 +193,7 @@ pub const Type = struct { .Frame, => false, - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), .Optional => { if (!is_equality_cmp) return false; return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); @@ -3012,38 +3012,59 @@ pub const Type = struct { } } - pub fn isConstPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, + pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => !ty.castTag(.pointer).?.data.mutable, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_const, + else => false, + }, }; } - pub fn isVolatilePtr(self: Type) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"volatile"; + pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, mod.intern_pool); + } + + pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"volatile", + else => false, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_volatile, + else => false, }, - else => false, }; } - pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"allowzero"; + pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"allowzero", + else => ty.zigTypeTag(mod) == .Optional, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_allowzero, + else => false, }, - else => return self.zigTypeTag(mod) == .Optional, }; } - pub fn isCPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => self.castTag(.pointer).?.data.size == .C, - - else => return false, + pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.size == .C, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + else => false, + }, }; } @@ -5063,7 +5084,7 @@ pub const Type = struct { return .{ .pointee_type = p.elem_type.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = p.alignment, + .@"align" = @intCast(u32, p.alignment), .@"addrspace" = p.address_space, .bit_offset = p.bit_offset, .host_size = p.host_size, @@ -5248,6 +5269,24 @@ pub const Type = struct { } } + if (d.pointee_type.ip_index != .none and + (d.sentinel == null or d.sentinel.?.ip_index != .none)) + { + return mod.ptrType(.{ + .elem_type = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .alignment = d.@"align", + .host_size = d.host_size, + .bit_offset = d.bit_offset, + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }); + } + return Type.Tag.pointer.create(arena, d); } -- cgit v1.2.3 From 2ffef605c75b62ba49e21bfb3256537a4a2c0a5e Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 7 May 2023 22:12:04 +0100 Subject: Replace uses of Value.zero, Value.one, Value.negative_one This is a bit nasty, mainly because Type.onePossibleValue is now errorable, which is a quite viral change. --- src/Air.zig | 2 +- src/Module.zig | 12 +- src/Sema.zig | 257 ++++++++++++++++++++++++++----------------- src/TypedValue.zig | 13 ++- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/x86_64/CodeGen.zig | 12 +- src/codegen.zig | 8 +- src/codegen/c.zig | 64 +++++------ src/codegen/llvm.zig | 18 +-- src/codegen/spirv.zig | 10 +- src/type.zig | 44 ++++---- src/value.zig | 43 ++++---- 16 files changed, 286 insertions(+), 223 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 549583e697..8059b9e57f 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1485,7 +1485,7 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); diff --git a/src/Module.zig b/src/Module.zig index f56235c933..3f5dc8039e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5750,7 +5750,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue(mod)) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -6887,6 +6887,16 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.debug.runtime_safety) { + // TODO: decide if this also works for ABI int types like enums + const tag = ty.zigTypeTag(mod); + assert(tag == .Int or tag == .ComptimeInt); + } + if (@TypeOf(x) == comptime_int) { + if (comptime std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (comptime std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + @compileError("Out-of-range comptime_int passed to Module.intValue"); + } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index dc5bb1cdea..9b1da74982 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3062,9 +3062,9 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) else - Value.zero; + try mod.intValue(enum_obj.tag_ty, 0); last_tag_val = tag_val; const copied_tag_val = try tag_val.copy(decl_arena_allocator); const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ @@ -4709,7 +4709,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(mod, i)) |opv| { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -8132,7 +8132,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (!op_ty.isAnyError()) { const names = op_ty.errorSetNames(); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), else => {}, } @@ -8167,7 +8167,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -9656,7 +9656,7 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ @@ -9665,7 +9665,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9705,8 +9705,9 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); } else dest_max_val; const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -9747,7 +9748,7 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9759,7 +9760,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -11250,7 +11251,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one, operand_ty); + item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty); }) { cases_len += 1; @@ -11696,7 +11697,7 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { @@ -11705,7 +11706,7 @@ const RangeSetUnhandledIterator = struct { it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { @@ -12150,7 +12151,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12235,14 +12236,14 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12348,7 +12349,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12417,14 +12418,14 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -13156,9 +13157,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13180,9 +13181,9 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13293,9 +13294,14 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13318,7 +13324,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13427,9 +13433,14 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13507,8 +13518,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13519,7 +13535,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13592,9 +13608,14 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13612,7 +13633,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13708,9 +13729,14 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13727,7 +13753,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13862,8 +13888,9 @@ fn addDivByZeroSafety( if (maybe_rhs_val != null) return; const mod = sema.mod; + const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else Value.float_zero; // TODO migrate to internpool const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13874,7 +13901,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13946,9 +13973,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14325,6 +14357,7 @@ fn zirOverflowArithmetic( wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero = try mod.intValue(dest_ty.scalarType(mod), 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -14332,12 +14365,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14358,7 +14391,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14373,12 +14406,13 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } } @@ -14386,9 +14420,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef()) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } } @@ -14410,12 +14444,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14766,6 +14800,11 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.isNan()) { @@ -14783,11 +14822,11 @@ fn analyzeArithmetic( break :lz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14813,11 +14852,11 @@ fn analyzeArithmetic( break :rz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14849,15 +14888,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14869,11 +14913,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14892,15 +14936,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14911,11 +14960,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14968,7 +15017,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -15785,7 +15834,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15798,7 +15847,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16148,7 +16197,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16256,7 +16305,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16344,7 +16393,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16454,7 +16503,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16496,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16692,7 +16741,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -17884,7 +17933,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -18544,8 +18593,8 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool(mod)) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -19761,7 +19810,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19804,17 +19853,17 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -21398,7 +21447,7 @@ fn analyzeShuffle( expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); } const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); @@ -24504,7 +24553,7 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { - if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24815,7 +24864,7 @@ fn tupleFieldValByIndex( const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24828,7 +24877,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -25205,7 +25254,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25256,13 +25305,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25812,7 +25861,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25879,7 +25928,7 @@ fn coerceExtra( try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = Value.zero, + .len = try mod.intValue(Type.usize, 0), }); return sema.addConstant(dest_ty, slice_val); } @@ -28234,7 +28283,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -32532,9 +32581,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try val.copy(decl_arena_allocator); } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; break :blk try val.copy(decl_arena_allocator); @@ -32903,7 +32952,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -33049,7 +33098,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; } @@ -33066,7 +33115,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { switch (enum_obj.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; }, @@ -33078,14 +33127,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 28212a164c..828fb610d4 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,7 +73,7 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -95,7 +98,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -112,7 +115,7 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -129,7 +132,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 95a8350c7d..ea3814a20e 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4311,7 +4311,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6154,7 +6154,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cc2bc3a613..967a6dd753 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4291,7 +4291,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6101,7 +6101,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1008d527f6..5cf621488e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1743,7 +1743,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -2551,7 +2551,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4231222d4b..2cb35460c2 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1343,7 +1343,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4575,7 +4575,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, }); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 327e2c13e0..36b805cf94 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -789,7 +789,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { assert(!gop.found_existing); const mod = func.bin_file.base.options.module.?; - const val = func.air.value(ref, mod).?; + const val = (try func.air.value(ref, mod)).?; const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; @@ -2195,7 +2195,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); @@ -3138,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.zero; + const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3792,7 +3792,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref, mod).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -5048,7 +5048,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51c6bc79e6..b208656a41 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2768,7 +2768,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const full_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), - .child = src_ty.childType(mod).ip_index, + .child = elem_ty.ip_index, }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -11265,7 +11265,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); @@ -11337,7 +11337,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); @@ -11601,7 +11601,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, })); break :tracking gop.value_ptr; }, @@ -11614,7 +11614,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { diff --git a/src/codegen.zig b/src/codegen.zig index 9c9868892f..8bd478bf7c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -675,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.zero else typed_value.val; + const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -690,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -722,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1280,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.zero; + const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9443c2298a..aaeec05562 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -287,7 +287,7 @@ pub const Function = struct { if (gop.found_existing) return gop.value_ptr.*; const mod = f.object.dg.module; - const val = f.air.value(ref, mod).?; + const val = (try f.air.value(ref, mod)).?; const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { @@ -356,7 +356,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -369,7 +369,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -383,7 +383,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -397,7 +397,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -690,7 +690,7 @@ pub const DeclGen = struct { location, ); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } @@ -1253,7 +1253,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; + const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); @@ -3611,7 +3611,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4183,7 +4183,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4269,7 +4269,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4735,7 +4735,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -5069,7 +5069,7 @@ fn airIsNull( // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = Value.zero } + TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); @@ -5325,7 +5325,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { }, .end => { try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5378,7 +5378,7 @@ fn fieldPtr( .end => { try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5546,7 +5546,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer); } try writer.writeAll(";\n"); return local; @@ -5673,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n "); return operand; @@ -5681,7 +5681,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) @@ -5737,7 +5737,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, err_ty, try mod.intValue(err_ty, 0), .Other); try a.end(f, writer); } return local; @@ -5768,11 +5768,11 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); return local; } @@ -5798,7 +5798,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); - try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); @@ -6022,7 +6022,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.i32, Value.zero), + try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6278,7 +6278,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6326,7 +6326,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6677,27 +6677,27 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => Value.zero, + .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { .unsigned => try scalar_ty.maxIntScalar(mod), - .signed => Value.negative_one, + .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.zero, + .Bool => try mod.intValue(scalar_ty, 0), .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Mul => Value.one, + .Mul => try mod.intValue(Type.comptime_int, 1), }, .Initializer); try writer.writeAll(";\n"); @@ -7686,13 +7686,13 @@ const Vectorize = struct { try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9d8c3edaf5..c42719d07c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2854,7 +2854,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(mod); - assert(elem_ty.onePossibleValue(mod) == null); + if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -3588,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.zero; + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.zero else tv.val, + .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -4476,7 +4476,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.typeOf(inst), - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -6873,7 +6873,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -8203,7 +8203,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8494,7 +8494,7 @@ pub const FuncGen = struct { const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); - if (self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9323,7 +9323,7 @@ pub const FuncGen = struct { var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; @@ -9344,7 +9344,7 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32e0c13c37..3842da5f7b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -232,7 +232,7 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; - if (self.air.value(inst, mod)) |val| { + if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.zero); + try self.addInt(Type.usize, Value.zero_usize); // TODO: Add dependency return; }, @@ -803,7 +803,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.zero; + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2801,7 +2801,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -3141,7 +3141,7 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item, mod) orelse { + const value = (try self.air.value(item, mod)) orelse { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { diff --git a/src/type.zig b/src/type.zig index e60d216085..e6d0af9f46 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3377,7 +3377,7 @@ pub const Type = struct { } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *const Module) Type { + pub fn scalarType(ty: Type, mod: *Module) Type { return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(mod), else => ty, @@ -3941,13 +3941,13 @@ pub const Type = struct { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -3956,13 +3956,13 @@ pub const Type = struct { .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); - if (array_type.child.toType().onePossibleValue(mod) != null) + if ((try array_type.child.toType().onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); - if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; }, .opt_type => |child| { @@ -4055,7 +4055,7 @@ pub const Type = struct { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue(mod) != null) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4066,7 +4066,7 @@ pub const Type = struct { for (tuple.values, 0..) |val, i| { const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue(mod) != null) continue; + if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4089,7 +4089,7 @@ pub const Type = struct { switch (enum_full.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_full.values.keys()[0]; }, @@ -4100,24 +4100,24 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasRuntimeBits(mod)) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue(mod) orelse return null; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -4128,7 +4128,7 @@ pub const Type = struct { .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.childType(mod).onePossibleValue(mod) != null) + if ((try ty.childType(mod).onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4365,8 +4365,8 @@ pub const Type = struct { /// Asserts that the type is an integer. pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return Value.zero; - if (info.bits == 0) return Value.negative_one; + if (info.signedness == .unsigned) return mod.intValue(ty, 0); + if (info.bits == 0) return mod.intValue(ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); @@ -4392,17 +4392,17 @@ pub const Type = struct { } /// Asserts that the type is an integer. - pub fn maxIntScalar(self: Type, mod: *Module) !Value { - const info = self.intInfo(mod); + pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => Value.negative_one, - .unsigned => Value.zero, + .signed => mod.intValue(ty, -1), + .unsigned => mod.intValue(ty, 0), }, 1 => return switch (info.signedness) { - .signed => Value.zero, - .unsigned => Value.one, + .signed => mod.intValue(ty, 0), + .unsigned => mod.intValue(ty, 0), }, else => {}, } @@ -4662,7 +4662,7 @@ pub const Type = struct { } } - pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; diff --git a/src/value.zig b/src/value.zig index eced9ba345..8268d1dde1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1022,7 +1022,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { - return writeToMemory(Value.zero, Type.usize, mod, buffer); + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); } }, else => return error.Unimplemented, @@ -1124,7 +1124,7 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1141,7 +1141,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { - return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -1173,7 +1173,7 @@ pub const Value = struct { const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return Value.zero; + if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { @@ -1290,12 +1290,12 @@ pub const Value = struct { } }, .Int, .Enum => { - if (buffer.len == 0) return Value.zero; + if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), @@ -2091,11 +2091,11 @@ pub const Value = struct { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue(mod) != null; + return (try ty.onePossibleValue(mod)) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue(mod) != null) { + if ((try ty.onePossibleValue(mod)) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2604,7 +2604,7 @@ pub const Value = struct { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; }, @@ -2723,7 +2723,7 @@ pub const Value = struct { }; } - pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2737,14 +2737,14 @@ pub const Value = struct { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(mod, index)) |some| { + if (try ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -2968,7 +2968,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .the_only_possible_value => return Value.zero, // for i0, u0 + .the_only_possible_value => return Value.float_zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3402,7 +3402,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3803,7 +3803,7 @@ pub const Value = struct { bits: u16, mod: *Module, ) !Value { - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, mod); @@ -4011,9 +4011,9 @@ pub const Value = struct { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return Value.zero; + return mod.intValue(ty, 0); } else { - return Value.negative_one; + return mod.intValue(ty, -1); } } @@ -5151,10 +5151,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; - pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; - pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; + pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; + pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; @@ -5169,7 +5168,9 @@ pub const Value = struct { } pub fn boolToInt(x: bool) Value { - return if (x) Value.one else Value.zero; + const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + const one: Value = .{ .ip_index = .one, .legacy = undefined }; + return if (x) one else zero; } pub const RuntimeIndex = enum(u32) { -- cgit v1.2.3 From 17882162b3be5542b4e289e5ddc6535a4bb4c6b1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 15 May 2023 20:09:54 -0700 Subject: stage2: move function types to InternPool --- lib/std/builtin.zig | 2 +- src/Air.zig | 7 +- src/InternPool.zig | 244 ++++++++++++++++++++++----- src/Module.zig | 38 ++++- src/Sema.zig | 334 ++++++++++++++++++------------------ src/Zir.zig | 5 +- src/arch/aarch64/CodeGen.zig | 44 +++-- src/arch/arm/CodeGen.zig | 44 +++-- src/arch/riscv64/CodeGen.zig | 22 +-- src/arch/sparc64/CodeGen.zig | 22 +-- src/arch/wasm/CodeGen.zig | 92 +++++----- src/arch/x86_64/CodeGen.zig | 34 ++-- src/codegen.zig | 2 +- src/codegen/c.zig | 13 +- src/codegen/c/type.zig | 34 ++-- src/codegen/llvm.zig | 235 +++++++++++++------------- src/codegen/spirv.zig | 22 +-- src/link/Coff.zig | 2 +- src/link/Dwarf.zig | 2 +- src/link/SpirV.zig | 6 +- src/target.zig | 11 ++ src/type.zig | 392 ++++++++++++------------------------------- src/value.zig | 5 + 23 files changed, 821 insertions(+), 791 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 429654bd4a..3e8970a354 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -143,7 +143,7 @@ pub const Mode = OptimizeMode; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const CallingConvention = enum { +pub const CallingConvention = enum(u8) { /// This is the default Zig calling convention used when not using `export` on `fn` /// and no other calling convention is specified. Unspecified, diff --git a/src/Air.zig b/src/Air.zig index e82a70100f..09f8d6c9e2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -845,7 +845,6 @@ pub const Inst = struct { pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -914,8 +913,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), @@ -1383,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return callee_ty.fnReturnType(); + return callee_ty.fnReturnTypeIp(ip); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 2435e0ad31..d4bfe5a244 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -148,6 +148,7 @@ pub const Key = union(enum) { union_type: UnionType, opaque_type: OpaqueType, enum_type: EnumType, + func_type: FuncType, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -185,6 +186,13 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this @@ -358,6 +366,44 @@ pub const Key = union(enum) { } }; + pub const FuncType = struct { + param_types: []Index, + return_type: Index, + /// Tells whether a parameter is comptime. See `paramIsComptime` helper + /// method for accessing this. + comptime_bits: u32, + /// Tells whether a parameter is noalias. See `paramIsNoalias` helper + /// method for accessing this. + noalias_bits: u32, + /// If zero use default target function code alignment. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. + alignment: u64, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + + pub fn paramIsComptime(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.comptime_bits >> i) != 0; + } + + pub fn paramIsNoalias(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.noalias_bits >> i) != 0; + } + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -512,6 +558,18 @@ pub const Key = union(enum) { for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); }, + + .func_type => |func_type| { + for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type); + std.hash.autoHash(hasher, func_type.return_type); + std.hash.autoHash(hasher, func_type.comptime_bits); + std.hash.autoHash(hasher, func_type.noalias_bits); + std.hash.autoHash(hasher, func_type.alignment); + std.hash.autoHash(hasher, func_type.cc); + std.hash.autoHash(hasher, func_type.is_var_args); + std.hash.autoHash(hasher, func_type.is_generic); + std.hash.autoHash(hasher, func_type.is_noinline); + }, } } @@ -670,6 +728,20 @@ pub const Key = union(enum) { std.mem.eql(Index, a_info.values, b_info.values) and std.mem.eql(NullTerminatedString, a_info.names, b_info.names); }, + + .func_type => |a_info| { + const b_info = b.func_type; + + return std.mem.eql(Index, a_info.param_types, b_info.param_types) and + a_info.return_type == b_info.return_type and + a_info.comptime_bits == b_info.comptime_bits and + a_info.noalias_bits == b_info.noalias_bits and + a_info.alignment == b_info.alignment and + a_info.cc == b_info.cc and + a_info.is_var_args == b_info.is_var_args and + a_info.is_generic == b_info.is_generic and + a_info.is_noinline == b_info.is_noinline; + }, } } @@ -687,6 +759,7 @@ pub const Key = union(enum) { .opaque_type, .enum_type, .anon_struct_type, + .func_type, => .type_type, inline .ptr, @@ -734,7 +807,6 @@ pub const Index = enum(u32) { pub const last_value: Index = .empty_struct; u1_type, - u5_type, u8_type, i8_type, u16_type, @@ -811,10 +883,10 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, - /// `1` (u5) - one_u5, - /// `4` (u5) - four_u5, + /// `1` (u8) + one_u8, + /// `4` (u8) + four_u8, /// `-1` (comptime_int) negative_one, /// `std.builtin.CallingConvention.C` @@ -880,12 +952,6 @@ pub const static_keys = [_]Key{ .bits = 1, } }, - // u5_type - .{ .int_type = .{ - .signedness = .unsigned, - .bits = 5, - } }, - .{ .int_type = .{ .signedness = .unsigned, .bits = 8, @@ -1074,14 +1140,14 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, - // one_u5 + // one_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 1 }, } }, - // four_u5 + // four_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 4 }, } }, // negative_one @@ -1092,12 +1158,12 @@ pub const static_keys = [_]Key{ // calling_convention_c .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .one_u5, + .int = .one_u8, } }, // calling_convention_inline .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .four_u5, + .int = .four_u8, } }, .{ .simple_value = .void }, @@ -1181,6 +1247,9 @@ pub const Tag = enum(u8) { /// An untagged union type which has a safety tag. /// `data` is `Module.Union.Index`. type_union_safety, + /// A function body type. + /// `data` is extra index to `TypeFunction`. + type_function, /// Typed `undefined`. /// `data` is `Index` of the type. @@ -1283,6 +1352,29 @@ pub const Tag = enum(u8) { aggregate, }; +/// Trailing: +/// 0. param_type: Index for each params_len +pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u11 = 0, + }; +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1371,24 +1463,6 @@ pub const Pointer = struct { flags: Flags, packed_offset: PackedOffset, - /// Stored as a power-of-two, with one special value to indicate none. - pub const Alignment = enum(u6) { - none = std.math.maxInt(u6), - _, - - pub fn toByteUnits(a: Alignment, default: u64) u64 { - return switch (a) { - .none => default, - _ => @as(u64, 1) << @enumToInt(a), - }; - } - - pub fn fromByteUnits(n: u64) Alignment { - if (n == 0) return .none; - return @intToEnum(Alignment, @ctz(n)); - } - }; - pub const Flags = packed struct(u32) { size: Size, alignment: Alignment, @@ -1409,6 +1483,24 @@ pub const Pointer = struct { pub const VectorIndex = Key.PtrType.VectorIndex; }; +/// Stored as a power-of-two, with one special value to indicate none. +pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } +}; + /// Used for non-sentineled arrays that have length fitting in u32, as well as /// vectors. pub const Vector = struct { @@ -1765,6 +1857,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), + .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, .opt_null => .{ .opt = .{ @@ -1896,6 +1989,29 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; } +fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { + const type_function = ip.extraDataTrail(TypeFunction, data); + const param_types = @ptrCast( + []Index, + ip.extra.items[type_function.end..][0..type_function.data.params_len], + ); + return .{ + .param_types = param_types, + .return_type = type_function.data.return_type, + .comptime_bits = type_function.data.comptime_bits, + .noalias_bits = type_function.data.noalias_bits, + .alignment = type_function.data.flags.alignment.toByteUnits(0), + .cc = type_function.data.flags.cc, + .is_var_args = type_function.data.flags.is_var_args, + .is_generic = type_function.data.flags.is_generic, + .is_noinline = type_function.data.flags.is_noinline, + .align_is_generic = type_function.data.flags.align_is_generic, + .cc_is_generic = type_function.data.flags.cc_is_generic, + .section_is_generic = type_function.data.flags.section_is_generic, + .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + }; +} + /// Asserts the integer tag type is already present in the InternPool. fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { return ip.getAssumeExists(.{ .int_type = .{ @@ -1977,7 +2093,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), + .alignment = Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, @@ -2163,6 +2279,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .func_type => |func_type| { + assert(func_type.return_type != .none); + for (func_type.param_types) |param_type| assert(param_type != .none); + + const params_len = @intCast(u32, func_type.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + + params_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = ip.addExtraAssumeCapacity(TypeFunction{ + .params_len = params_len, + .return_type = func_type.return_type, + .comptime_bits = func_type.comptime_bits, + .noalias_bits = func_type.noalias_bits, + .flags = .{ + .alignment = Alignment.fromByteUnits(func_type.alignment), + .cc = func_type.cc, + .is_var_args = func_type.is_var_args, + .is_generic = func_type.is_generic, + .is_noinline = func_type.is_noinline, + .align_is_generic = func_type.align_is_generic, + .cc_is_generic = func_type.cc_is_generic, + .section_is_generic = func_type.section_is_generic, + .addrspace_is_generic = func_type.addrspace_is_generic, + }, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -2736,6 +2883,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { OptionalMapIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2797,6 +2945,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), + TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2988,17 +3137,17 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } } -pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; if (tags[@enumToInt(val)] != .type_struct) return .none; const datas = ip.items.items(.data); return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; switch (tags[@enumToInt(val)]) { .type_union_tagged, .type_union_untagged, .type_union_safety => {}, else => return .none, @@ -3007,6 +3156,16 @@ pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { + assert(val != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + switch (tags[@enumToInt(val)]) { + .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]), + else => return null, + } +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -3092,6 +3251,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_union_safety, => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_function => b: { + const info = ip.extraData(TypeFunction, data); + break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len); + }, + .undef => 0, .simple_type => 0, .simple_value => 0, diff --git a/src/Module.zig b/src/Module.zig index cf1fea3444..c8e676f813 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -846,7 +846,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; const ty = (decl.val.castTag(.ty) orelse return .none).data; - return mod.intern_pool.indexToStruct(ty.ip_index); + return mod.intern_pool.indexToStructType(ty.ip_index); } /// If the Decl has a value and it is a union, return it, @@ -4764,7 +4764,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention() == .Inline; + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; if (is_inline) { @@ -5617,6 +5617,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); + const fn_ty = decl.ty; + const fn_ty_info = mod.typeToFunc(fn_ty).?; + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -5626,7 +5629,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = decl.ty.fnReturnType(), + .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; @@ -5664,8 +5667,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const fn_ty = decl.ty; - const fn_ty_info = fn_ty.fnInfo(); const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` @@ -5692,7 +5693,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index]; + } else fn_ty_info.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -6864,6 +6865,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { + return (try intern(mod, .{ .func_type = info })).toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { @@ -6996,6 +7001,16 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { return i.toValue(); } +pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { + const ip = &mod.intern_pool; + assert(ip.isOptionalType(opt_ty.ip_index)); + const result = try ip.get(mod.gpa, .{ .opt = .{ + .ty = opt_ty.ip_index, + .val = .none, + } }); + return result.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -7201,15 +7216,22 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * A struct which has no fields (`struct {}`). /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { - const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { - const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; return mod.unionPtr(union_index); } +pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { + if (ty.ip_index == .none) return null; + return mod.intern_pool.indexToFuncType(ty.ip_index); +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); diff --git a/src/Sema.zig b/src/Sema.zig index 74b3cdd114..eb8dc5a633 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5850,6 +5850,7 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const src = LazySrcLoc.nodeOffset(extra.node); @@ -5862,8 +5863,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst const func = sema.func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); - switch (fn_owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => if (block.inlining != null) { @@ -5871,7 +5872,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6378,7 +6379,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. @@ -6393,7 +6394,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { + if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6402,10 +6403,10 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].isGenericPoison()) + if (func_ty_info.param_types[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index]); + break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); @@ -6506,7 +6507,7 @@ fn checkCallArgumentCount( return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @boolToInt(member_fn); if (func_ty_info.is_var_args) { @@ -6562,7 +6563,7 @@ fn callBuiltin( std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); @@ -6573,7 +6574,7 @@ fn callBuiltin( const GenericCallAdapter = struct { generic_fn: *Module.Fn, precomputed_hash: u64, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, args: []const Arg, module: *Module, @@ -6656,7 +6657,7 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { @@ -6704,7 +6705,7 @@ fn analyzeCall( var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { - if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { + if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { // stage1 can't handle doing this directly @@ -6712,7 +6713,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; } @@ -6750,7 +6751,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; }, @@ -6875,9 +6876,9 @@ fn analyzeCall( // comptime state. var should_memoize = true; - var new_fn_info = fn_owner_decl.ty.fnInfo(); - new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); - new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; + new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); + new_fn_info.comptime_bits = 0; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6970,7 +6971,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty; + new_fn_info.return_type = fn_ret_ty.ip_index; const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -6993,7 +6994,7 @@ fn analyzeCall( } } - const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); + const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); @@ -7081,13 +7082,14 @@ fn analyzeCall( assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); + const fn_info = mod.typeToFunc(func_ty).?; for (uncasted_args, 0..) |uncasted_arg, i| { if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = func_ty.fnParamType(i); + const param_ty = fn_info.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7126,8 +7128,8 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); + if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7155,7 +7157,7 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) { + } else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { @@ -7171,7 +7173,7 @@ fn analyzeCall( try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type.isNoReturn()) { + } else if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7208,13 +7210,13 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, memoized_call_key: Module.MemoizedCall.Key, - raw_param_types: []const Type, + raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { @@ -7233,13 +7235,14 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (!raw_param_ty.isGenericPoison()) break :param_ty raw_param_ty; + if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + break :param_ty param_ty.toIntern(); }; new_fn_info.param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; @@ -7247,7 +7250,7 @@ fn analyzeInlineCallArg( } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ + const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(u32, arg_i.*), } }) catch |err| switch (err) { @@ -7276,7 +7279,7 @@ fn analyzeInlineCallArg( } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty, + .ty = param_ty.toType(), .val = arg_val, }; } else { @@ -7292,7 +7295,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg); + new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7357,7 +7360,7 @@ fn analyzeGenericCallArg( uncasted_arg: Air.Inst.Ref, comptime_arg: TypedValue, runtime_args: []Air.Inst.Ref, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, runtime_i: *u32, ) !void { const mod = sema.mod; @@ -7365,7 +7368,7 @@ fn analyzeGenericCallArg( comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*]; + const param_ty = new_fn_info.param_types[runtime_i.*].toType(); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; @@ -7387,7 +7390,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7431,14 +7434,14 @@ fn instantiateGenericCall( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7609,7 +7612,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = func_ty.fnInfo(); + const new_fn_info = mod.typeToFunc(func_ty).?; const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { @@ -7647,12 +7650,12 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type); + try sema.queueFullTypeResolution(new_fn_info.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { + if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7677,7 +7680,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type.isNoReturn()) { + if (new_fn_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7695,7 +7698,7 @@ fn resolveGenericInstantiationType( module_fn: *Module.Fn, new_module_func: *Module.Fn, namespace: Namespace.Index, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, ) !*Module.Fn { @@ -7755,14 +7758,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7822,13 +7825,13 @@ fn resolveGenericInstantiationType( var is_comptime = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_comptime = true; @@ -7868,8 +7871,8 @@ fn resolveGenericInstantiationType( new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = new_decl.ty.fnInfo(); - if (try sema.typeRequiresComptime(new_fn_info.return_type)) { + const new_fn_info = mod.typeToFunc(new_decl.ty).?; + if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead @@ -8969,19 +8972,19 @@ fn funcCommon( // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(Type, block.params.items.len); - const comptime_params = try sema.arena.alloc(bool, block.params.items.len); - for (block.params.items, 0..) |param, i| { + const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); + var comptime_bits: u32 = 0; + for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, noalias_bits >> index) != 0; }; - param_types[i] = param.ty; + dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( block, .unneeded, param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8994,7 +8997,7 @@ fn funcCommon( block, Module.paramSrc(src_node_offset, mod, decl, i), param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -9019,7 +9022,7 @@ fn funcCommon( else => |e| return e, }; - const return_type = if (!inferred_error_set or ret_poison) + const return_type: Type = if (!inferred_error_set or ret_poison) bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); @@ -9047,7 +9050,9 @@ fn funcCommon( }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) + { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(sema.mod), @tagName(cc_resolved), @@ -9141,8 +9146,7 @@ fn funcCommon( return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - for (comptime_params) |ct| is_generic = is_generic or ct; - is_generic = is_generic or ret_ty_requires_comptime; + is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; if (!is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can @@ -9151,10 +9155,11 @@ fn funcCommon( _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); } - break :fn_ty try Type.Tag.function.create(sema.arena, .{ + break :fn_ty try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, - .return_type = return_type, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, .alignment = alignment orelse 0, @@ -9164,7 +9169,6 @@ fn funcCommon( .is_var_args = var_args, .is_generic = is_generic, .is_noinline = is_noinline, - .noalias_bits = noalias_bits, }); }; @@ -9203,7 +9207,7 @@ fn funcCommon( return sema.addType(fn_ty); } - const is_inline = fn_ty.fnCallingConvention() == .Inline; + const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { @@ -9243,7 +9247,7 @@ fn analyzeParameter( block: *Block, param_src: LazySrcLoc, param: Block.Param, - comptime_params: []bool, + comptime_bits: *u32, i: usize, is_generic: *bool, cc: std.builtin.CallingConvention, @@ -9252,14 +9256,16 @@ fn analyzeParameter( ) !void { const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); - comptime_params[i] = param.is_comptime or requires_comptime; + if (param.is_comptime or requires_comptime) { + comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + } const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; const target = mod.getTarget(); - if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (!param.ty.isValidParamType(mod)) { @@ -9275,7 +9281,7 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ param.ty.fmt(mod), @tagName(cc), @@ -15986,22 +15992,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), .Fn => { // TODO: look into memoizing this result. - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); - for (param_vals, 0..) |*param_val, i| { - const param_ty = info.param_types[i]; - const is_generic = param_ty.isGenericPoison(); - const param_ty_val = if (is_generic) - Value.null - else - try Value.Tag.opt_payload.create( - params_anon_decl.arena(), - try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())), - ); + for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { + const is_generic = param_ty == .generic_poison_type; + const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (is_generic) .none else param_ty, + } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; @@ -16015,7 +16017,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_noalias: bool, Value.makeBool(is_noalias), // type: ?type, - param_ty_val, + param_ty_val.toValue(), }; param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); } @@ -16059,13 +16061,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const ret_ty_opt = if (!info.return_type.isGenericPoison()) - try Value.Tag.opt_payload.create( - sema.arena, - try Value.Tag.ty.create(sema.arena, info.return_type), - ) - else - Value.null; + const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); @@ -16080,7 +16079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_var_args: bool, Value.makeBool(info.is_var_args), // return_type: ?type, - ret_ty_opt, + ret_ty_opt.toValue(), // args: []const Fn.Param, args_val, }; @@ -17788,7 +17787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -18939,7 +18938,7 @@ fn zirReify( if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -19483,12 +19482,10 @@ fn zirReify( const args_slice_val = args_val.castTag(.slice).?.data; const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - const param_types = try sema.arena.alloc(Type, args_len); - const comptime_params = try sema.arena.alloc(bool, args_len); + const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; - var i: usize = 0; - while (i < args_len) : (i += 1) { + for (param_types, 0..) |*param_type, i| { const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19505,25 +19502,22 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType().copy(sema.arena); + param_type.* = param_type_val.ip_index; if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime(mod)) { + if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } - - param_types[i] = param_type; - comptime_params[i] = false; } - var fn_info = Type.Payload.Function.Data{ + const ty = try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, + .comptime_bits = 0, .noalias_bits = noalias_bits, - .return_type = try return_type.toType().copy(sema.arena), + .return_type = return_type.toIntern(), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19533,9 +19527,7 @@ fn zirReify( .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, - }; - - const ty = try Type.Tag.function.create(sema.arena, fn_info); + }); return sema.addType(ty); }, .Frame => return sema.failWithUseOfAsync(block, src), @@ -23435,7 +23427,7 @@ fn explainWhyTypeIsComptimeInner( .Pointer => { const elem_ty = ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Fn) { - const fn_info = elem_ty.fnInfo(); + const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); } @@ -23443,7 +23435,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) { + if (fn_info.return_type.toType().comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23543,10 +23535,10 @@ fn validateExternType( const target = sema.mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { + if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } - return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); + return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { return sema.validateExternType(try ty.intTagType(mod), position); @@ -23619,7 +23611,7 @@ fn explainWhyTypeIsNotExtern( try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } - switch (ty.fnCallingConvention()) { + switch (ty.fnCallingConvention(mod)) { .Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}), .Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}), @@ -24548,10 +24540,10 @@ fn fieldCallBind( try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag(mod) == .Fn and - decl_type.fnParamLen() >= 1) - { - const first_param_type = decl_type.fnParamType(0); + if (mod.typeToFunc(decl_type)) |func_type| f: { + if (func_type.param_types.len == 0) break :f; + + const first_param_type = func_type.param_types[0].toType(); // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and @@ -27090,8 +27082,9 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ty.fnInfo(); - const src_info = src_ty.fnInfo(); + const mod = sema.mod; + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; if (dest_info.is_var_args != src_info.is_var_args) { return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; @@ -27108,13 +27101,13 @@ fn coerceInMemoryAllowedFns( } }; } - if (!src_info.return_type.isNoReturn()) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); + if (src_info.return_type != .noreturn_type) { + const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src); if (rt != .ok) { return InMemoryCoercionResult{ .fn_return_type = .{ .child = try rt.dupe(sema.arena), - .actual = src_info.return_type, - .wanted = dest_info.return_type, + .actual = src_info.return_type.toType(), + .wanted = dest_info.return_type.toType(), } }; } } @@ -27134,22 +27127,23 @@ fn coerceInMemoryAllowedFns( } for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i]; + const src_param_ty = src_info.param_types[i].toType(); - if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { + const i_small = @intCast(u5, i); + if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = i, - .wanted = dest_info.comptime_params[i], + .wanted = dest_info.paramIsComptime(i_small), } }; } // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src); if (param != .ok) { return InMemoryCoercionResult{ .fn_param = .{ .child = try param.dupe(sema.arena), .actual = src_param_ty, - .wanted = dest_param_ty, + .wanted = dest_param_ty.toType(), .index = i, } }; } @@ -31205,17 +31199,17 @@ fn resolvePeerTypes( return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(fn_info.return_type); + try sema.resolveTypeFully(fn_info.return_type.toType()); - if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } } @@ -31286,16 +31280,16 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeLayout(param_ty); + try sema.resolveTypeLayout(param_ty.toType()); } - try sema.resolveTypeLayout(info.return_type); + try sema.resolveTypeLayout(info.return_type.toType()); }, else => {}, } @@ -31615,15 +31609,13 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31644,7 +31636,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31653,6 +31645,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -31799,16 +31793,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } - try sema.resolveTypeFully(info.return_type); + try sema.resolveTypeFully(info.return_type.toType()); }, else => {}, } @@ -31881,7 +31875,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .none => return ty, .u1_type, - .u5_type, .u8_type, .i8_type, .u16_type, @@ -31941,8 +31934,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, - .one_u5 => unreachable, - .four_u5 => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, @@ -32083,14 +32076,14 @@ fn resolveInferredErrorSet( // `*Module.Fn`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); - const ies_func_info = ies_func_owner_decl.ty.fnInfo(); + const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.isGenericPoison()) { + if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -32285,7 +32278,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const prev_field_index = struct_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); - try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; }; @@ -32387,7 +32380,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); @@ -32402,7 +32395,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); @@ -32580,7 +32573,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); } if (fields_len > 0) { @@ -32590,7 +32583,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(sema.mod), + int_tag_ty.fmt(mod), fields_len - 1, }); break :msg msg; @@ -32605,7 +32598,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { union_obj.tag_ty = provided_ty; const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { .enum_type => |x| x, - else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}), + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. @@ -32705,7 +32698,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32751,7 +32744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const prev_field_index = union_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; @@ -32766,7 +32759,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .range = .type, }).lazy; const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - field_name, union_obj.tag_ty.fmt(sema.mod), + field_name, union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -32800,7 +32793,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field); @@ -32815,7 +32808,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -33060,7 +33053,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -33087,7 +33079,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => null, + + .ptr_type, + .error_union_type, + .func_type, + => null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -33102,13 +33099,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -33674,15 +33671,13 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33703,7 +33698,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33714,6 +33709,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, + .func_type => true, + .simple_type => |t| return switch (t) { .f16, .f32, @@ -33870,7 +33867,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const fn_info = ty.fnInfo(); + const mod = sema.mod; + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -33878,7 +33876,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { .Inline => return false, else => {}, } - if (try sema.typeRequiresComptime(fn_info.return_type)) { + if (try sema.typeRequiresComptime(fn_info.return_type.toType())) { return false; } return true; diff --git a/src/Zir.zig b/src/Zir.zig index 136920d75d..ec3288620c 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2052,7 +2052,6 @@ pub const Inst = struct { /// and `[]Ref`. pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -2121,8 +2120,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 3e893411fc..dea5b63129 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -472,7 +472,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! _ = try self.addInst(.{ @@ -1146,7 +1146,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4271,7 +4271,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4428,10 +4428,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4460,10 +4460,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4483,7 +4484,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6226,12 +6226,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6239,8 +6238,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6271,8 +6269,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6280,14 +6278,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { + if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) }; + result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6298,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(mod) == 8) { + if (ty.toType().abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6336,10 +6334,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5cc165fdfe..e84c4de981 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -478,7 +478,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} const push_reloc = try self.addNop(); @@ -1123,7 +1123,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4250,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // untouched by the parameter passing code const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value); break :result MCValue{ .register = new_reg }; } }, @@ -4374,10 +4374,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4406,10 +4406,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4429,7 +4430,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6171,12 +6171,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6184,8 +6183,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6219,11 +6217,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(mod) == 8) + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(mod)); + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6235,7 +6233,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(mod) == 8) + if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6269,10 +6267,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5cf621488e..faa2b2b7d0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -347,7 +347,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for riscv64. @@ -1803,7 +1804,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ @@ -2621,12 +2623,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -2634,8 +2635,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -2655,8 +2655,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0677b72f1a..9d58dd9f29 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -363,7 +363,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -4458,12 +4459,11 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -4471,8 +4471,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -4495,8 +4494,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4580,7 +4579,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for a branch instruction, patch this later diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6ae5163714..a950264840 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { fn genFunctype( gpa: Allocator, cc: std.builtin.CallingConvention, - params: []const Type, + params: []const InternPool.Index, return_type: Type, mod: *Module, ) !wasm.Type { @@ -1170,7 +1170,8 @@ fn genFunctype( } // param types - for (params) |param_type| { + for (params) |param_type_ip| { + const param_type = param_type_ip.toType(); if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { @@ -1234,9 +1235,9 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const fn_info = func.decl.ty.fnInfo(); const mod = func.bin_file.base.options.module.?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + const fn_info = mod.typeToFunc(func.decl.ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1345,10 +1346,8 @@ const CallWValues = struct { fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { const mod = func.bin_file.base.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); - defer func.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallWValues = .{ .args = &.{}, .return_value = .none, @@ -1360,8 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1370,8 +1368,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + for (fn_info.param_types) |ty| { + if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1380,8 +1378,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, mod); + for (fn_info.param_types) |ty| { + const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -2095,11 +2093,11 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const fn_info = func.decl.ty.fnInfo(); - const ret_ty = fn_info.return_type; - const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(func.decl.ty).?; + const ret_ty = fn_info.return_type.toType(); // result must be stored in the stack and we return a pointer // to the stack instead @@ -2146,8 +2144,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { break :result func.return_value; } @@ -2163,12 +2161,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = func.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(func.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2191,9 +2189,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif .Pointer => ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); + const ret_ty = fn_ty.fnReturnType(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod); const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; @@ -2203,8 +2201,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { const ext_decl = mod.declPtr(extern_fn.data.owner_decl); - const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); + const ext_info = mod.typeToFunc(ext_decl.ty).?; + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2235,7 +2233,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); + try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } if (callee) |direct| { @@ -2248,7 +2246,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2264,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { + } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); const scalar_type = abi.scalarType(ret_ty, mod); @@ -2528,7 +2526,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = func.decl.ty.fnInfo().cc; + const cc = mod.typeToFunc(func.decl.ty).?.cc; const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); @@ -2647,9 +2645,9 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), - .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), - .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); @@ -2839,7 +2837,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In }; // fma requires three operands - var param_types_buffer: [3]Type = .{ ty, ty, ty }; + var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; return func.callIntrinsic(fn_name, param_types, ty, args); } @@ -5298,7 +5296,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! // call __extendhfsf2(f16) f32 const f32_result = try func.callIntrinsic( "__extendhfsf2", - &.{Type.f16}, + &.{.f16_type}, Type.f32, &.{operand}, ); @@ -5316,7 +5314,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5347,7 +5345,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } else operand; // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op}); + return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); } var fn_name_buf: [12]u8 = undefined; @@ -5356,7 +5354,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5842,7 +5840,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); @@ -5866,19 +5864,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5977,7 +5975,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // call to compiler-rt `fn fmaf(f32, f32, f32) f32` var result = try func.callIntrinsic( "fmaf", - &.{ Type.f32, Type.f32, Type.f32 }, + &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); @@ -6707,7 +6705,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn callIntrinsic( func: *CodeGen, name: []const u8, - param_types: []const Type, + param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { @@ -6735,8 +6733,8 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); - try func.lowerArg(.C, param_types[arg_i], arg); + assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod)); + try func.lowerArg(.C, param_types[arg_i].toType(), arg); } // Actually call our intrinsic @@ -6938,7 +6936,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.const_slice_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 30c3248360..149f872c9a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -697,7 +698,8 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = 1 }), ); - var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) { + const fn_info = mod.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create( @@ -1566,7 +1568,7 @@ fn asmMemoryRegisterImmediate( fn gen(self: *Self) InnerError!void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); @@ -8042,7 +8044,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, }; - var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame); + const fn_info = mod.typeToFunc(fn_ty).?; + + var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame); defer info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -8083,7 +8087,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_lock = switch (info.return_value.long) { .none, .unreach => null, .indirect => |reg_off| lock: { - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, @@ -8199,9 +8203,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv.short) { .none => {}, .register => try self.genCopy(ret_ty, self.ret_mcv.short, operand), @@ -11683,18 +11688,23 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( self: *Self, - fn_ty: Type, + fn_info: InternPool.Key.FuncType, var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { const mod = self.bin_file.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_len = fn_ty.fnParamLen(); - const param_types = try self.gpa.alloc(Type, param_len + var_args.len); + const cc = fn_info.cc; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + dest.* = src.toType(); + } // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| { + param_ty.* = self.typeOf(arg); + } + var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -11704,7 +11714,7 @@ fn resolveCallingConventionValues( }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); switch (cc) { .Naked => { diff --git a/src/codegen.zig b/src/codegen.zig index 90b6bfccf2..9eb294feac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1081,7 +1081,7 @@ fn genDeclRef( // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (tv.ty.castPtrToFn(mod)) |fn_ty| { - if (fn_ty.fnInfo().is_generic) { + if (mod.typeToFunc(fn_ty).?.is_generic) { return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } } else if (tv.ty.zigTypeTag(mod) == .Pointer) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f45c178223..601382c1fd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1507,7 +1507,7 @@ pub const DeclGen = struct { const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_decl.ty).?; if (fn_info.cc == .Naked) { switch (kind) { .forward => try w.writeAll("zig_naked_decl "), @@ -1517,7 +1517,7 @@ pub const DeclGen = struct { } if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn "); + if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -3455,7 +3455,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true) + if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true) try writer.writeAll("return;\n"); } return .none; @@ -4094,7 +4094,7 @@ fn airCall( ) !CValue { const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; const gpa = f.object.dg.gpa; const writer = f.object.writer(); @@ -4143,7 +4143,7 @@ fn airCall( else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { @@ -4622,8 +4622,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnreach(f: *Function) !CValue { + const mod = f.object.dg.module; // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return .none; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b51d81a30b..a2af395a98 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1720,7 +1720,7 @@ pub const CType = extern union { .Opaque => self.init(.void), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (!info.is_generic) { if (lookup.isMutable()) { const param_kind: Kind = switch (kind) { @@ -1728,10 +1728,10 @@ pub const CType = extern union { .complete, .parameter, .global => .parameter, .payload => unreachable, }; - _ = try lookup.typeToIndex(info.return_type, param_kind); + _ = try lookup.typeToIndex(info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - _ = try lookup.typeToIndex(param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + _ = try lookup.typeToIndex(param_type.toType(), param_kind); } } self.init(if (info.is_var_args) .varargs_function else .function); @@ -2013,7 +2013,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (kind) { .forward, .forward_parameter => .forward_parameter, @@ -2023,21 +2023,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2145,7 +2145,7 @@ pub const CType = extern union { => { if (ty.zigTypeTag(mod) != .Fn) return false; - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; const param_kind: Kind = switch (self.kind) { @@ -2154,18 +2154,18 @@ pub const CType = extern union { .payload => unreachable, }; - if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) + if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind)) return false; var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; c_param_i += 1; - if (!self.eqlRecurse(param_type, param_cty, param_kind)) + if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind)) return false; } return c_param_i == data.param_types.len; @@ -2258,7 +2258,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, @@ -2266,10 +2266,10 @@ pub const CType = extern union { .payload => unreachable, }; - self.updateHasherRecurse(hasher, info.return_type, param_kind); + self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - self.updateHasherRecurse(hasher, param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + self.updateHasherRecurse(hasher, param_type.toType(), param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3289d389b4..476f73cbe4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -954,17 +954,17 @@ pub const Object = struct { builder.positionBuilderAtEnd(entry_block); // This gets the LLVM values from the function and stores them in `dg.args`. - const fn_info = decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(decl.ty).?; const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) @@ -986,7 +986,7 @@ pub const Object = struct { .byval => { assert(!it.byval_attr); const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); @@ -1005,7 +1005,7 @@ pub const Object = struct { llvm_arg_i += 1; }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1024,7 +1024,7 @@ pub const Object = struct { } }, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1044,7 +1044,7 @@ pub const Object = struct { }, .abi_sized_int => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1071,7 +1071,7 @@ pub const Object = struct { }, .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { @@ -1104,7 +1104,7 @@ pub const Object = struct { .multiple_llvm_types => { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); @@ -1135,7 +1135,7 @@ pub const Object = struct { args.appendAssumeCapacity(casted); }, .float_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1153,7 +1153,7 @@ pub const Object = struct { } }, .i32_array, .i64_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1182,7 +1182,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and !mod.decl_exports.contains(decl_index); - const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) + const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn else 0; @@ -2331,26 +2331,26 @@ pub const Object = struct { return full_di_ty; }, .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; var param_di_types = std.ArrayList(*llvm.DIType).init(gpa); defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { const sret = firstParamSRet(fn_info, mod); - const di_ret_ty = if (sret) Type.void else fn_info.return_type; + const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType(); try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); @@ -2358,13 +2358,13 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty, mod)) { - const ptr_ty = try mod.singleMutPtrType(param_ty); + if (isByRef(param_ty.toType(), mod)) { + const ptr_ty = try mod.singleMutPtrType(param_ty.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { - try param_di_types.append(try o.lowerDebugType(param_ty, .full)); + try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full)); } } @@ -2565,7 +2565,7 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); - const fn_info = zig_fn_type.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_type).?; const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -2598,11 +2598,11 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType()); llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { @@ -2626,13 +2626,13 @@ pub const DeclGen = struct { } if (fn_info.alignment != 0) { - llvm_fn.setAlignment(fn_info.alignment); + llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment)); } // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); - if (fn_info.return_type.isNoReturn()) { + if (fn_info.return_type == .noreturn_type) { dg.addFnAttr(llvm_fn, "noreturn"); } @@ -2645,15 +2645,15 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod); + const param_llvm_ty = try dg.lowerType(param_ty.toType()); + const alignment = param_ty.toType().abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -3142,7 +3142,7 @@ pub const DeclGen = struct { fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = dg.module; - const fn_info = fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_ty).?; const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); @@ -3152,7 +3152,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); @@ -3163,19 +3163,19 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); try llvm_params.append(try dg.lowerType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(dg.context.pointerType(0)); }, .abi_sized_int => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) @@ -3195,7 +3195,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.intType(16)); }, .float_array => |count| { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); @@ -3223,7 +3223,7 @@ pub const DeclGen = struct { const mod = dg.module; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, - .Fn => !elem_ty.fnInfo().is_generic, + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; @@ -4204,7 +4204,7 @@ pub const DeclGen = struct { const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or - (is_fn_body and decl.ty.fnInfo().is_generic)) + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) { return self.lowerPtrToVoid(tv.ty); } @@ -4354,7 +4354,7 @@ pub const DeclGen = struct { llvm_fn: *llvm.Value, param_ty: Type, param_index: u32, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) void { const mod = dg.module; @@ -4774,8 +4774,8 @@ pub const FuncGen = struct { .Pointer => callee_ty.childType(mod), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); - const return_type = fn_info.return_type; + const fn_info = mod.typeToFunc(zig_fn_ty).?; + const return_type = fn_info.return_type.toType(); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -4790,7 +4790,7 @@ pub const FuncGen = struct { break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4971,14 +4971,14 @@ pub const FuncGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param_llvm_ty = try self.dg.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); @@ -4998,7 +4998,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; @@ -5023,7 +5023,7 @@ pub const FuncGen = struct { }; } - if (return_type.isNoReturn() and attr != .AlwaysTail) { + if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { return null; } @@ -5088,9 +5088,9 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5135,9 +5135,9 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -6148,25 +6148,21 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const is_internal_linkage = !mod.decl_exports.contains(decl_index); - var fn_ty_pl: Type.Payload.Function = .{ - .base = .{ .tag = .function }, - .data = .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = Type.void, - .alignment = 0, - .noalias_bits = 0, - .cc = .Unspecified, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - }, - }; - const fn_ty = Type.initPayload(&fn_ty_pl.base); + const fn_ty = try mod.funcType(.{ + .param_types = &.{}, + .return_type = .void_type, + .alignment = 0, + .noalias_bits = 0, + .comptime_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }); const subprogram = dib.createFunction( di_file.toScope(), decl.name, @@ -10546,31 +10542,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; +fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { + if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), + .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, mod), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - .Stdcall => return !isScalar(mod, fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), else => return false, } } @@ -10585,13 +10581,14 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError(mod)) { + if (return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); @@ -10600,61 +10597,61 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type, mod)) { + if (isByRef(return_type, mod)) { return dg.context.voidType(); } else { - return dg.lowerType(fn_info.return_type); + return dg.lowerType(return_type); } }, .C => { switch (target.cpu.arch) { - .mips, .mipsel => return dg.lowerType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(dg, fn_info), else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const scalar_type = wasm_c_abi.scalarType(return_type, mod); const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), - .float_array => return dg.lowerType(fn_info.return_type), - .byval => return dg.lowerType(fn_info.return_type), + .float_array => return dg.lowerType(return_type), + .byval => return dg.lowerType(return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); } else { return dg.context.voidType(); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10664,50 +10661,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { }; return dg.context.structType(&llvm_types_buffer, 2, .False); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, // TODO investigate C ABI for other architectures - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } }, .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { return dg.context.voidType(); } }, - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } } -fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { + const return_type = fn_info.return_type.toType(); + switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .win_i128 => return dg.context.intType(64).vectorType(2), .memory => return dg.context.voidType(), - .sse => return dg.lowerType(fn_info.return_type), + .sse => return dg.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + const return_type = fn_info.return_type.toType(); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); + const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10748,7 +10747,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10756,7 +10755,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm const ParamTypeIterator = struct { dg: *DeclGen, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, llvm_types_len: u32, @@ -10781,7 +10780,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= it.fn_info.param_types.len) return null; const ty = it.fn_info.param_types[it.zig_index]; it.byval_attr = false; - return nextInner(it, ty); + return nextInner(it, ty.toType()); } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. @@ -10793,7 +10792,7 @@ const ParamTypeIterator = struct { return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { - return nextInner(it, it.fn_info.param_types[it.zig_index]); + return nextInner(it, it.fn_info.param_types[it.zig_index].toType()); } } @@ -11009,7 +11008,7 @@ const ParamTypeIterator = struct { } }; -fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator { +fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator { return .{ .dg = dg, .fn_info = fn_info, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32ea975b64..777bb1cff9 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1227,8 +1227,9 @@ pub const DeclGen = struct { }, .Fn => switch (repr) { .direct => { + const fn_info = mod.typeToFunc(ty).?; // TODO: Put this somewhere in Sema.zig - if (ty.fnIsVarArgs()) + if (fn_info.is_var_args) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen()); @@ -1546,18 +1547,17 @@ pub const DeclGen = struct { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), + .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)), .id_result = decl_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. .function_type = prototype_id, }); - const params = decl.ty.fnParamLen(); - var i: usize = 0; + const fn_info = mod.typeToFunc(decl.ty).?; - try self.args.ensureUnusedCapacity(self.gpa, params); - while (i < params) : (i += 1) { - const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); + try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); + for (fn_info.param_types) |param_type| { + const param_type_id = try self.resolveTypeId(param_type.toType()); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, @@ -3338,10 +3338,10 @@ pub const DeclGen = struct { .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; - const result_type_id = try self.resolveTypeId(return_type); + const result_type_id = try self.resolveTypeId(return_type.toType()); const result_id = self.spv.allocId(); const callee_id = try self.resolve(pl_op.operand); @@ -3368,11 +3368,11 @@ pub const DeclGen = struct { .id_ref_3 = params[0..n_params], }); - if (return_type.isNoReturn()) { + if (return_type == .noreturn_type) { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 452356de2c..efaeebc62e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1430,7 +1430,7 @@ pub fn updateDeclExports( .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, }; - const decl_cc = exported_decl.ty.fnCallingConvention(); + const decl_cc = exported_decl.ty.fnCallingConvention(mod); if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and self.base.options.link_libc) { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9722f8c95..92ea2a15dc 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1022,7 +1022,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) const decl_name_with_null = decl_name[0 .. decl_name.len + 1]; try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); - const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_type = decl.ty.fnReturnType(mod); const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index fbdcbd5a8e..da25753b95 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -131,12 +131,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) pub fn updateDeclExports( self: *SpirV, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { - const decl = module.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) { + const decl = mod.declPtr(decl_index); + if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/target.zig b/src/target.zig index c89f8ce92c..ac78d27c1a 100644 --- a/src/target.zig +++ b/src/target.zig @@ -649,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 { else => "o", // Non-standard }; } + +pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool { + return switch (cc) { + .Unspecified, .Async, .Inline => true, + // For now we want to authorize PTX kernel to use zig objects, even if + // we end up exposing the ABI. The goal is to experiment with more + // integrated CPU/GPU code. + .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, + else => false, + }; +} diff --git a/src/type.zig b/src/type.zig index 32fa64a1ac..daf8b305cc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,8 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .function => return .Fn, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -66,6 +64,7 @@ pub const Type = struct { .union_type => return .Union, .opaque_type => return .Opaque, .enum_type => return .Enum, + .func_type => return .Fn, .simple_type => |s| switch (s) { .f16, .f32, @@ -344,53 +343,6 @@ pub const Type = struct { return true; }, - .function => { - if (b.zigTypeTag(mod) != .Fn) return false; - - const a_info = a.fnInfo(); - const b_info = b.fnInfo(); - - if (!a_info.return_type.isGenericPoison() and - !b_info.return_type.isGenericPoison() and - !eql(a_info.return_type, b_info.return_type, mod)) - return false; - - if (a_info.is_var_args != b_info.is_var_args) - return false; - - if (a_info.is_generic != b_info.is_generic) - return false; - - if (a_info.is_noinline != b_info.is_noinline) - return false; - - if (a_info.noalias_bits != b_info.noalias_bits) - return false; - - if (!a_info.cc_is_generic and a_info.cc != b_info.cc) - return false; - - if (!a_info.align_is_generic and a_info.alignment != b_info.alignment) - return false; - - if (a_info.param_types.len != b_info.param_types.len) - return false; - - for (a_info.param_types, 0..) |a_param_ty, i| { - const b_param_ty = b_info.param_types[i]; - if (a_info.comptime_params[i] != b_info.comptime_params[i]) - return false; - - if (a_param_ty.isGenericPoison()) continue; - if (b_param_ty.isGenericPoison()) continue; - - if (!eql(a_param_ty, b_param_ty, mod)) - return false; - } - - return true; - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -501,32 +453,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .function => { - std.hash.autoHash(hasher, std.builtin.TypeId.Fn); - - const fn_info = ty.fnInfo(); - if (!fn_info.return_type.isGenericPoison()) { - hashWithHasher(fn_info.return_type, hasher, mod); - } - if (!fn_info.align_is_generic) { - std.hash.autoHash(hasher, fn_info.alignment); - } - if (!fn_info.cc_is_generic) { - std.hash.autoHash(hasher, fn_info.cc); - } - std.hash.autoHash(hasher, fn_info.is_var_args); - std.hash.autoHash(hasher, fn_info.is_generic); - std.hash.autoHash(hasher, fn_info.is_noinline); - std.hash.autoHash(hasher, fn_info.noalias_bits); - - std.hash.autoHash(hasher, fn_info.param_types.len); - for (fn_info.param_types, 0..) |param_ty, i| { - std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); - if (param_ty.isGenericPoison()) continue; - hashWithHasher(param_ty, hasher, mod); - } - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -631,30 +557,6 @@ pub const Type = struct { }; }, - .function => { - const payload = self.castTag(.function).?.data; - const param_types = try allocator.alloc(Type, payload.param_types.len); - for (payload.param_types, 0..) |param_ty, i| { - param_types[i] = try param_ty.copy(allocator); - } - const other_comptime_params = payload.comptime_params[0..payload.param_types.len]; - const comptime_params = try allocator.dupe(bool, other_comptime_params); - return Tag.function.create(allocator, .{ - .return_type = try payload.return_type.copy(allocator), - .param_types = param_types, - .cc = payload.cc, - .alignment = payload.alignment, - .is_var_args = payload.is_var_args, - .is_generic = payload.is_generic, - .is_noinline = payload.is_noinline, - .comptime_params = comptime_params.ptr, - .align_is_generic = payload.align_is_generic, - .cc_is_generic = payload.cc_is_generic, - .section_is_generic = payload.section_is_generic, - .addrspace_is_generic = payload.addrspace_is_generic, - .noalias_bits = payload.noalias_bits, - }); - }, .pointer => { const payload = self.castTag(.pointer).?.data; const sent: ?Value = if (payload.sentinel) |some| @@ -766,32 +668,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .function => { - const payload = ty.castTag(.function).?.data; - try writer.writeAll("fn("); - for (payload.param_types, 0..) |param_type, i| { - if (i != 0) try writer.writeAll(", "); - try param_type.dump("", .{}, writer); - } - if (payload.is_var_args) { - if (payload.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (payload.alignment != 0) { - try writer.print("align({d}) ", .{payload.alignment}); - } - if (payload.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(payload.cc)); - try writer.writeAll(") "); - } - ty = payload.return_type; - continue; - }, - .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -909,48 +785,6 @@ pub const Type = struct { try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, - .function => { - const fn_info = ty.fnInfo(); - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (fn_info.paramIsComptime(i)) { - try writer.writeAll("comptime "); - } - if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { - try writer.writeAll("noalias "); - }; - if (param_ty.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(param_ty, writer, mod); - } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(fn_info.return_type, writer, mod); - } - }, - .error_union => { const error_union = ty.castTag(.error_union).?.data; try print(error_union.error_set, writer, mod); @@ -1158,6 +992,48 @@ pub const Type = struct { const decl = mod.declPtr(enum_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(param_ty.toType(), writer, mod); + } + } + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.alignment != 0) { + try writer.print("align({d}) ", .{fn_info.alignment}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type.toType(), writer, mod); + } + }, // values, not types .undef => unreachable, @@ -1174,6 +1050,11 @@ pub const Type = struct { } } + pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; + } + pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { @@ -1223,7 +1104,7 @@ pub const Type = struct { if (ignore_comptime_only) { return true; } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !ty.childType(mod).fnInfo().is_generic; + return !mod.typeToFunc(ty.childType(mod)).?.is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -1231,12 +1112,6 @@ pub const Type = struct { } }, - // These are false because they are comptime-only types. - // These are function *bodies*, not pointers. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .function => return false, - .optional => { const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { @@ -1262,7 +1137,7 @@ pub const Type = struct { // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic; if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); return !comptimeOnly(ty, mod); }, @@ -1293,6 +1168,13 @@ pub const Type = struct { } }, .error_union_type => @panic("TODO"), + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1436,8 +1318,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - // These are function bodies, not function pointers. - .function, .error_union, .anyframe_T, => false, @@ -1448,12 +1328,21 @@ pub const Type = struct { .optional => ty.isPtrLikeOptional(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .ptr_type => true, + .int_type, + .ptr_type, + .vector_type, + => true, + + .error_union_type, + .anon_struct_type, + .opaque_type, + // These are function bodies, not function pointers. + .func_type, + => false, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .vector_type => true, .opt_type => |child| child.toType().isPtrLikeOptional(mod), - .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1509,12 +1398,10 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, - .anon_struct_type => false, .union_type => |union_type| switch (union_type.runtime_tag) { .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, .tagged => false, }, - .opaque_type => false, .enum_type => |enum_type| switch (enum_type.tag_mode) { .auto => false, .explicit, .nonexhaustive => true, @@ -1546,7 +1433,7 @@ pub const Type = struct { pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -1555,7 +1442,7 @@ pub const Type = struct { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) return false; + if (fn_info.return_type.toType().comptimeOnly(mod)) return false; return true; }, else => return ty.hasRuntimeBits(mod), @@ -1707,13 +1594,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - // represents machine code; not a pointer - .function => { - const alignment = ty.castTag(.function).?.data.alignment; - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; - }, - .pointer, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -1753,6 +1633,13 @@ pub const Type = struct { .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + // represents machine code; not a pointer + .func_type => |func_type| { + const alignment = @intCast(u32, func_type.alignment); + if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; + return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + }, + .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2086,7 +1973,6 @@ pub const Type = struct { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2187,6 +2073,7 @@ pub const Type = struct { .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2408,7 +2295,6 @@ pub const Type = struct { switch (ty.ip_index) { .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2453,6 +2339,7 @@ pub const Type = struct { }, .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .f16 => return 16, .f32 => return 32, @@ -3271,6 +3158,7 @@ pub const Type = struct { .opt_type => unreachable, .error_union_type => unreachable, + .func_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .union_type => unreachable, @@ -3356,54 +3244,22 @@ pub const Type = struct { }; } - /// Asserts the type is a function. - pub fn fnParamLen(self: Type) usize { - return self.castTag(.function).?.data.param_types.len; - } - - /// Asserts the type is a function. The length of the slice must be at least the length - /// given by `fnParamLen`. - pub fn fnParamTypes(self: Type, types: []Type) void { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - } - - /// Asserts the type is a function. - pub fn fnParamType(self: Type, index: usize) Type { - switch (self.tag()) { - .function => { - const payload = self.castTag(.function).?.data; - return payload.param_types[index]; - }, - - else => unreachable, - } - } - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type) Type { - const fn_ty = switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.pointee_type, - .function => ty, - else => unreachable, - }; - return fn_ty.castTag(.function).?.data.return_type; + pub fn fnReturnType(ty: Type, mod: *Module) Type { + return fnReturnTypeIp(ty, mod.intern_pool); } - /// Asserts the type is a function. - pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return self.castTag(.function).?.data.cc; + pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + return switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, + .func_type => |func_type| func_type.return_type, + else => unreachable, + }.toType(); } /// Asserts the type is a function. - pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool { - return switch (cc) { - .Unspecified, .Async, .Inline => true, - // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. - // The goal is to experiment with more integrated CPU/GPU code. - .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, - else => false, - }; + pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc; } pub fn isValidParamType(self: Type, mod: *const Module) bool { @@ -3421,12 +3277,8 @@ pub const Type = struct { } /// Asserts the type is a function. - pub fn fnIsVarArgs(self: Type) bool { - return self.castTag(.function).?.data.is_var_args; - } - - pub fn fnInfo(ty: Type) Payload.Function.Data { - return ty.castTag(.function).?.data; + pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args; } pub fn isNumeric(ty: Type, mod: *const Module) bool { @@ -3474,7 +3326,6 @@ pub const Type = struct { .error_set_single, .error_set, .error_set_merged, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -3500,7 +3351,12 @@ pub const Type = struct { return null; } }, - .ptr_type => return null, + + .ptr_type, + .error_union_type, + .func_type, + => return null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -3514,13 +3370,13 @@ pub const Type = struct { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => return null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -3682,9 +3538,6 @@ pub const Type = struct { .error_set_merged, => false, - // These are function bodies, not function pointers. - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, @@ -3721,6 +3574,9 @@ pub const Type = struct { .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + // These are function bodies, not function pointers. + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -4367,6 +4223,10 @@ pub const Type = struct { return ty.ip_index == .generic_poison_type; } + pub fn isBoundFn(ty: Type) bool { + return ty.ip_index == .none and ty.tag() == .bound_fn; + } + /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -4383,7 +4243,6 @@ pub const Type = struct { // After this, the tag requires a payload. pointer, - function, optional, error_union, anyframe_T, @@ -4411,7 +4270,6 @@ pub const Type = struct { .error_set_merged => Payload.ErrorSetMerged, .pointer => Payload.Pointer, - .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, }; @@ -4508,36 +4366,6 @@ pub const Type = struct { data: u16, }; - pub const Function = struct { - pub const base_tag = Tag.function; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - // TODO look into optimizing this memory to take fewer bytes - pub const Data = struct { - param_types: []Type, - comptime_params: [*]bool, - return_type: Type, - /// If zero use default target function code alignment. - alignment: u32, - noalias_bits: u32, - cc: std.builtin.CallingConvention, - is_var_args: bool, - is_generic: bool, - is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, - section_is_generic: bool, - addrspace_is_generic: bool, - - pub fn paramIsComptime(self: @This(), i: usize) bool { - assert(i < self.param_types.len); - return self.comptime_params[i]; - } - }; - }; - pub const ErrorSet = struct { pub const base_tag = Tag.error_set; diff --git a/src/value.zig b/src/value.zig index 50e3fc8061..35d144f912 100644 --- a/src/value.zig +++ b/src/value.zig @@ -602,6 +602,11 @@ pub const Value = struct { return result; } + pub fn toIntern(val: Value) InternPool.Index { + assert(val.ip_index != .none); + return val.ip_index; + } + /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { if (self.ip_index != .none) return self.ip_index.toType(); -- cgit v1.2.3 From 6e0de1d11694a58745da76d601ebab7562feed09 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 22 May 2023 07:58:02 -0400 Subject: InternPool: port most of value tags --- lib/std/array_list.zig | 44 + src/Air.zig | 6 +- src/AstGen.zig | 30 +- src/Compilation.zig | 5 +- src/InternPool.zig | 827 ++++++++-- src/Module.zig | 409 +++-- src/Sema.zig | 2882 ++++++++++++++++------------------- src/TypedValue.zig | 245 +-- src/Zir.zig | 4 +- src/arch/aarch64/CodeGen.zig | 17 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 11 +- src/arch/sparc64/CodeGen.zig | 11 +- src/arch/wasm/CodeGen.zig | 351 +++-- src/arch/x86_64/CodeGen.zig | 65 +- src/codegen.zig | 1035 ++++++------- src/codegen/c.zig | 957 ++++++------ src/codegen/llvm.zig | 1608 +++++++++---------- src/codegen/spirv.zig | 312 ++-- src/link.zig | 19 +- src/link/C.zig | 10 +- src/link/Coff.zig | 18 +- src/link/Dwarf.zig | 8 +- src/link/Elf.zig | 18 +- src/link/MachO.zig | 28 +- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 14 +- src/link/SpirV.zig | 6 +- src/link/Wasm.zig | 40 +- src/print_air.zig | 4 +- src/type.zig | 463 +++--- src/value.zig | 1775 +++++---------------- tools/lldb_pretty_printers.py | 6 +- tools/stage2_gdb_pretty_printers.py | 2 +- 34 files changed, 5236 insertions(+), 6010 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index bbfa588d6d..c2a2486dfa 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. @@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. diff --git a/src/Air.zig b/src/Air.zig index 070cf7dc72..9dcbe174ec 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -901,8 +901,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), @@ -1382,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, + .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); diff --git a/src/AstGen.zig b/src/AstGen.zig index 998e08ba04..6956a58ae4 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3934,7 +3934,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4137,7 +4137,7 @@ fn globalVarDecl( break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -7878,7 +7878,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -8100,12 +8100,12 @@ fn builtinCall( if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -8271,11 +8271,11 @@ fn builtinCall( .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), @@ -8334,7 +8334,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -8450,7 +8450,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -8546,7 +8546,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -8701,7 +8701,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -8851,7 +8851,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -8869,7 +8869,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -10317,8 +10317,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), diff --git a/src/Compilation.zig b/src/Compilation.zig index 43b16241fc..30ac499955 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -226,7 +226,7 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: *Module.Fn, + codegen_func: Module.Fn.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -3208,7 +3208,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; + try module.ensureFuncBodyAnalysisQueued(func_index); } }, .update_embed_file => |embed_file| { diff --git a/src/InternPool.zig b/src/InternPool.zig index d19cc3d647..ec4d1df45f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -34,6 +34,12 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// Fn objects are stored in this data structure because: +/// * They need to be mutated after creation. +allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, +/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. +funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, + /// InferredErrorSet objects are stored in this data structure because: /// * They contain pointers such as the errors map and the set of other inferred error sets. /// * They need to be mutated after creation. @@ -66,18 +72,18 @@ const Limb = std.math.big.Limb; const InternPool = @This(); const Module = @import("Module.zig"); +const Sema = @import("Sema.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a); + return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { - _ = ctx; - return a.hash32(); + return a.hash32(ctx.intern_pool); } }; @@ -111,10 +117,19 @@ pub const RuntimeIndex = enum(u32) { } }; +/// An index into `string_bytes`. +pub const String = enum(u32) { + _, +}; + /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { _, + pub fn toString(self: NullTerminatedString) String { + return @intToEnum(String, @enumToInt(self)); + } + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); } @@ -180,23 +195,20 @@ pub const Key = union(enum) { /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. undef: Index, + runtime_value: TypeValue, simple_value: SimpleValue, - extern_func: struct { - ty: Index, - /// The Decl that corresponds to the function itself. - decl: Module.Decl.Index, - /// Library name if specified. - /// For example `extern "c" fn write(...) usize` would have 'c' as library name. - /// Index into the string table bytes. - lib_name: u32, - }, + variable: Key.Variable, + extern_func: ExternFunc, + func: Func, int: Key.Int, + err: Error, + error_union: ErrorUnion, + enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. enum_tag: Key.EnumTag, float: Key.Float, ptr: Ptr, opt: Opt, - /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -261,7 +273,7 @@ pub const Key = union(enum) { pub const ArrayType = struct { len: u64, child: Index, - sentinel: Index, + sentinel: Index = .none, }; pub const VectorType = struct { @@ -369,6 +381,7 @@ pub const Key = union(enum) { return @intCast(u32, x); }, .i64, .big_int => return null, // out of range + .lazy_align, .lazy_size => unreachable, } } }; @@ -441,6 +454,32 @@ pub const Key = union(enum) { } }; + pub const Variable = struct { + ty: Index, + init: Index, + decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString = .none, + is_extern: bool = false, + is_const: bool = false, + is_threadlocal: bool = false, + is_weak_linkage: bool = false, + }; + + pub const ExternFunc = struct { + ty: Index, + /// The Decl that corresponds to the function itself. + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: OptionalNullTerminatedString, + }; + + pub const Func = struct { + ty: Index, + index: Module.Fn.Index, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -449,6 +488,8 @@ pub const Key = union(enum) { u64: u64, i64: i64, big_int: BigIntConst, + lazy_align: Index, + lazy_size: Index, /// Big enough to fit any non-BigInt value pub const BigIntSpace = struct { @@ -460,13 +501,26 @@ pub const Key = union(enum) { pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { return switch (storage) { .big_int => |x| x, - .u64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), - .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .lazy_align, .lazy_size => unreachable, }; } }; }; + pub const Error = struct { + ty: Index, + name: NullTerminatedString, + }; + + pub const ErrorUnion = struct { + ty: Index, + val: union(enum) { + err_name: NullTerminatedString, + payload: Index, + }, + }; + pub const EnumTag = struct { /// The enum type. ty: Index, @@ -497,19 +551,8 @@ pub const Key = union(enum) { len: Index = .none, pub const Addr = union(enum) { - @"var": struct { - init: Index, - owner_decl: Module.Decl.Index, - lib_name: OptionalNullTerminatedString, - is_const: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - }, decl: Module.Decl.Index, - mut_decl: struct { - decl: Module.Decl.Index, - runtime_index: RuntimeIndex, - }, + mut_decl: MutDecl, int: Index, eu_payload: Index, opt_payload: Index, @@ -517,6 +560,10 @@ pub const Key = union(enum) { elem: BaseIndex, field: BaseIndex, + pub const MutDecl = struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }; pub const BaseIndex = struct { base: Index, index: u64, @@ -546,22 +593,31 @@ pub const Key = union(enum) { storage: Storage, pub const Storage = union(enum) { + bytes: []const u8, elems: []const Index, repeated_elem: Index, + + pub fn values(self: *const Storage) []const Index { + return switch (self.*) { + .bytes => &.{}, + .elems => |elems| elems, + .repeated_elem => |*elem| @as(*const [1]Index, elem), + }; + } }; }; - pub fn hash32(key: Key) u32 { - return @truncate(u32, key.hash64()); + pub fn hash32(key: Key, ip: *const InternPool) u32 { + return @truncate(u32, key.hash64(ip)); } - pub fn hash64(key: Key) u64 { + pub fn hash64(key: Key, ip: *const InternPool) u64 { var hasher = std.hash.Wyhash.init(0); - key.hashWithHasher(&hasher); + key.hashWithHasher(&hasher, ip); return hasher.final(); } - pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void { const KeyTag = @typeInfo(Key).Union.tag_type.?; const key_tag: KeyTag = key; std.hash.autoHash(hasher, key_tag); @@ -575,27 +631,45 @@ pub const Key = union(enum) { .error_union_type, .simple_type, .simple_value, - .extern_func, .opt, .struct_type, .union_type, .un, .undef, + .err, + .error_union, + .enum_literal, .enum_tag, .inferred_error_set_type, => |info| std.hash.autoHash(hasher, info), + .runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), + .variable => |variable| std.hash.autoHash(hasher, variable.decl), + .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl), + .func => |func| std.hash.autoHash(hasher, func.index), + .int => |int| { // Canonicalize all integers by converting them to BigIntConst. - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - - std.hash.autoHash(hasher, int.ty); - std.hash.autoHash(hasher, big_int.positive); - for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + + std.hash.autoHash(hasher, int.ty); + std.hash.autoHash(hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + }, + .lazy_align, .lazy_size => |lazy_ty| { + std.hash.autoHash( + hasher, + @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), + ); + std.hash.autoHash(hasher, lazy_ty); + }, + } }, .float => |float| { @@ -615,7 +689,6 @@ pub const Key = union(enum) { // This is sound due to pointer provenance rules. std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); switch (ptr.addr) { - .@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl), .decl => |decl| std.hash.autoHash(hasher, decl), .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), .int => |int| std.hash.autoHash(hasher, int), @@ -629,13 +702,47 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); - std.hash.autoHash(hasher, @as( - @typeInfo(Key.Aggregate.Storage).Union.tag_type.?, - aggregate.storage, - )); + switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + }, + else => {}, + } + switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), - .repeated_elem => |elem| std.hash.autoHash(hasher, elem), + .bytes => unreachable, + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, } }, @@ -663,7 +770,7 @@ pub const Key = union(enum) { } } - pub fn eql(a: Key, b: Key) bool { + pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; @@ -709,9 +816,9 @@ pub const Key = union(enum) { const b_info = b.undef; return a_info == b_info; }, - .extern_func => |a_info| { - const b_info = b.extern_func; - return std.meta.eql(a_info, b_info); + .runtime_value => |a_info| { + const b_info = b.runtime_value; + return a_info.val == b_info.val; }, .opt => |a_info| { const b_info = b.opt; @@ -729,11 +836,36 @@ pub const Key = union(enum) { const b_info = b.un; return std.meta.eql(a_info, b_info); }, + .err => |a_info| { + const b_info = b.err; + return std.meta.eql(a_info, b_info); + }, + .error_union => |a_info| { + const b_info = b.error_union; + return std.meta.eql(a_info, b_info); + }, + .enum_literal => |a_info| { + const b_info = b.enum_literal; + return a_info == b_info; + }, .enum_tag => |a_info| { const b_info = b.enum_tag; return std.meta.eql(a_info, b_info); }, + .variable => |a_info| { + const b_info = b.variable; + return a_info.decl == b_info.decl; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return a_info.decl == b_info.decl; + }, + .func => |a_info| { + const b_info = b.func; + return a_info.index == b_info.index; + }, + .ptr => |a_info| { const b_info = b.ptr; if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; @@ -742,7 +874,6 @@ pub const Key = union(enum) { if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; return switch (a_info.addr) { - .@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl, .decl => |a_decl| a_decl == b_info.addr.decl, .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .int => |a_int| a_int == b_info.addr.int, @@ -765,16 +896,27 @@ pub const Key = union(enum) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .i64 => |aa| switch (b_info.storage) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .big_int => |aa| switch (b_info.storage) { .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, .big_int => |bb| aa.eq(bb), + .lazy_align, .lazy_size => false, + }, + .lazy_align => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_size => false, + .lazy_align => |bb| aa == bb, + }, + .lazy_size => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_align => false, + .lazy_size => |bb| aa == bb, }, }; }, @@ -818,12 +960,43 @@ pub const Key = union(enum) { if (a_info.ty != b_info.ty) return false; const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; - if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) return false; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { + for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| { + const a_elem = switch (a_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + const b_elem = switch (b_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + if (a_elem != b_elem) return false; + } + return true; + } - return switch (a_info.storage) { - .elems => |a_elems| std.mem.eql(Index, a_elems, b_info.storage.elems), - .repeated_elem => |a_elem| a_elem == b_info.storage.repeated_elem, - }; + switch (a_info.storage) { + .bytes => |a_bytes| { + const b_bytes = b_info.storage.bytes; + return std.mem.eql(u8, a_bytes, b_bytes); + }, + .elems => |a_elems| { + const b_elems = b_info.storage.elems; + return std.mem.eql(Index, a_elems, b_elems); + }, + .repeated_elem => |a_elem| { + const b_elem = b_info.storage.repeated_elem; + return a_elem == b_elem; + }, + } }, .anon_struct_type => |a_info| { const b_info = b.anon_struct_type; @@ -876,16 +1049,23 @@ pub const Key = union(enum) { .func_type, => .type_type, - inline .ptr, + inline .runtime_value, + .ptr, .int, .float, .opt, + .variable, .extern_func, + .func, + .err, + .error_union, .enum_tag, .aggregate, .un, => |x| x.ty, + .enum_literal => .enum_literal_type, + .undef => |x| x, .simple_value => |s| switch (s) { @@ -977,8 +1157,8 @@ pub const Index = enum(u32) { manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, inferred_alloc_const_type, @@ -1128,11 +1308,11 @@ pub const Index = enum(u32) { }, undef: DataIsIndex, + runtime_value: DataIsIndex, simple_value: struct { data: SimpleValue }, - ptr_var: struct { data: *PtrVar }, ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_decl: struct { data: *PtrDecl }, - ptr_int: struct { data: *PtrInt }, + ptr_int: struct { data: *PtrAddr }, ptr_eu_payload: DataIsIndex, ptr_opt_payload: DataIsIndex, ptr_comptime_field: struct { data: *PtrComptimeField }, @@ -1151,6 +1331,12 @@ pub const Index = enum(u32) { int_small: struct { data: *IntSmall }, int_positive: struct { data: u32 }, int_negative: struct { data: u32 }, + int_lazy_align: struct { data: *IntLazy }, + int_lazy_size: struct { data: *IntLazy }, + error_set_error: struct { data: *Key.Error }, + error_union_error: struct { data: *Key.Error }, + error_union_payload: struct { data: *TypeValue }, + enum_literal: struct { data: NullTerminatedString }, enum_tag: struct { data: *Key.EnumTag }, float_f16: struct { data: f16 }, float_f32: struct { data: f32 }, @@ -1160,18 +1346,21 @@ pub const Index = enum(u32) { float_c_longdouble_f80: struct { data: *Float80 }, float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, + variable: struct { data: *Variable }, extern_func: struct { data: void }, func: struct { data: void }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, + bytes: struct { data: *Bytes }, aggregate: struct { data: *Aggregate }, repeated: struct { data: *Repeated }, }) void { _ = self; - @setEvalBranchQuota(10_000); - inline for (@typeInfo(Tag).Enum.fields) |tag| { - inline for (@typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields) |entry| { - if (comptime std.mem.eql(u8, tag.name, entry.name)) break; + const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; + @setEvalBranchQuota(2_000); + inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { + inline for (0..map_fields.len) |offset| { + if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; } else { @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); } @@ -1318,14 +1507,14 @@ pub const static_keys = [_]Key{ .is_const = true, } }, - // const_slice_u8_type + // slice_const_u8_type .{ .ptr_type = .{ .elem_type = .u8_type, .size = .Slice, .is_const = true, } }, - // const_slice_u8_sentinel_0_type + // slice_const_u8_sentinel_0_type .{ .ptr_type = .{ .elem_type = .u8_type, .sentinel = .zero_u8, @@ -1505,12 +1694,13 @@ pub const Tag = enum(u8) { /// `data` is `Index` of the type. /// Untyped `undefined` is stored instead via `simple_value`. undef, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + /// `data` is `Index` of the value. + runtime_value, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to a var. - /// data is extra index of PtrVal, which contains the type and address. - ptr_var, /// A pointer to a decl that can be mutated at comptime. /// data is extra index of PtrMutDecl, which contains the type and address. ptr_mut_decl, @@ -1518,7 +1708,7 @@ pub const Tag = enum(u8) { /// data is extra index of PtrDecl, which contains the type and address. ptr_decl, /// A pointer with an integer value. - /// data is extra index of PtrInt, which contains the type and address. + /// data is extra index of PtrAddr, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, @@ -1585,6 +1775,24 @@ pub const Tag = enum(u8) { /// A negative integer value. /// data is a limbs index to `Int`. int_negative, + /// The ABI alignment of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_align, + /// The ABI size of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_size, + /// An error value. + /// data is extra index of `Key.Error`. + error_set_error, + /// An error union error. + /// data is extra index of `Key.Error`. + error_union_error, + /// An error union payload. + /// data is extra index of `TypeValue`. + error_union_payload, + /// An enum literal value. + /// data is `NullTerminatedString` of the error name. + enum_literal, /// An enum tag value. /// data is extra index of `Key.EnumTag`. enum_tag, @@ -1617,9 +1825,14 @@ pub const Tag = enum(u8) { /// A comptime_float value. /// data is extra index to Float128. float_comptime_float, + /// A global variable. + /// data is extra index to Variable. + variable, /// An extern function. + /// data is extra index to Key.ExternFunc. extern_func, /// A regular function. + /// data is extra index to Key.Func. func, /// This represents the only possible value for *some* types which have /// only one possible value. Not all only-possible-values are encoded this way; @@ -1631,6 +1844,9 @@ pub const Tag = enum(u8) { only_possible_value, /// data is extra index to Key.Union. union_value, + /// An array of bytes. + /// data is extra index to `Bytes`. + bytes, /// An instance of a struct, array, or vector. /// data is extra index to `Aggregate`. aggregate, @@ -1670,6 +1886,13 @@ pub const TypeFunction = struct { }; }; +pub const Bytes = struct { + /// The type of the aggregate + ty: Index, + /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + bytes: String, +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1843,6 +2066,11 @@ pub const Array = struct { } }; +pub const TypeValue = struct { + ty: Index, + val: Index, +}; + /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -1888,21 +2116,22 @@ pub const PackedU64 = packed struct(u64) { } }; -pub const PtrVar = struct { - ty: Index, - /// If flags.is_extern == true this is `none`. +pub const Variable = struct { + /// This is a value if has_init is true, otherwise a type. init: Index, - owner_decl: Module.Decl.Index, + decl: Module.Decl.Index, /// Library name if specified. /// For example `extern "c" var stderrp = ...` would have 'c' as library name. lib_name: OptionalNullTerminatedString, flags: Flags, pub const Flags = packed struct(u32) { + has_init: bool, + is_extern: bool, is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, - _: u29 = 0, + _: u27 = 0, }; }; @@ -1917,7 +2146,7 @@ pub const PtrMutDecl = struct { runtime_index: RuntimeIndex, }; -pub const PtrInt = struct { +pub const PtrAddr = struct { ty: Index, addr: Index, }; @@ -1949,6 +2178,11 @@ pub const IntSmall = struct { value: u32, }; +pub const IntLazy = struct { + ty: Index, + lazy_ty: Index, +}; + /// A f64 value, broken up into 2 u32 parts. pub const Float64 = struct { piece0: u32, @@ -2063,6 +2297,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.funcs_free_list.deinit(gpa); + ip.allocated_funcs.deinit(gpa); + ip.inferred_error_sets_free_list.deinit(gpa); ip.allocated_inferred_error_sets.deinit(gpa); @@ -2235,6 +2472,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, + .runtime_value => { + const val = @intToEnum(Index, data); + return .{ .runtime_value = .{ + .ty = ip.typeOf(val), + .val = val, + } }; + }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -2251,18 +2495,11 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .val = payload_val, } }; }, - .ptr_var => { - const info = ip.extraData(PtrVar, data); + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); return .{ .ptr = .{ .ty = info.ty, - .addr = .{ .@"var" = .{ - .init = info.init, - .owner_decl = info.owner_decl, - .lib_name = info.lib_name, - .is_const = info.flags.is_const, - .is_threadlocal = info.flags.is_threadlocal, - .is_weak_linkage = info.flags.is_weak_linkage, - } }, + .addr = .{ .decl = info.decl }, } }; }, .ptr_mut_decl => { @@ -2275,15 +2512,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }, } }; }, - .ptr_decl => { - const info = ip.extraData(PtrDecl, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .decl = info.decl }, - } }; - }, .ptr_int => { - const info = ip.extraData(PtrInt, data); + const info = ip.extraData(PtrAddr, data); return .{ .ptr = .{ .ty = info.ty, .addr = .{ .int = info.addr }, @@ -2383,6 +2613,17 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .storage = .{ .u64 = info.value }, } }; }, + .int_lazy_align, .int_lazy_size => |tag| { + const info = ip.extraData(IntLazy, data); + return .{ .int = .{ + .ty = info.ty, + .storage = switch (tag) { + .int_lazy_align => .{ .lazy_align = info.lazy_ty }, + .int_lazy_size => .{ .lazy_size = info.lazy_ty }, + else => unreachable, + }, + } }; + }, .float_f16 => .{ .float = .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, @@ -2415,8 +2656,21 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .ty = .comptime_float_type, .storage = .{ .f128 = ip.extraData(Float128, data).get() }, } }, - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => { + const extra = ip.extraData(Variable, data); + return .{ .variable = .{ + .ty = if (extra.flags.has_init) ip.typeOf(extra.init) else extra.init, + .init = if (extra.flags.has_init) extra.init else .none, + .decl = extra.decl, + .lib_name = extra.lib_name, + .is_extern = extra.flags.is_extern, + .is_const = extra.flags.is_const, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .extern_func => .{ .extern_func = ip.extraData(Key.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Key.Func, data) }, .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { @@ -2438,6 +2692,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .bytes => { + const extra = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(extra.ty)); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, + } }; + }, .aggregate => { const extra = ip.extraDataTrail(Aggregate, data); const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); @@ -2455,6 +2717,22 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .error_union_error => { + const extra = ip.extraData(Key.Error, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .err_name = extra.name }, + } }; + }, + .error_union_payload => { + const extra = ip.extraData(TypeValue, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .payload = extra.val }, + } }; + }, + .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; } @@ -2547,7 +2825,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { _ = ip.map.pop(); var new_key = key; new_key.ptr_type.size = .Many; - const ptr_type_index = try get(ip, gpa, new_key); + const ptr_type_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2677,6 +2955,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(ty), }); }, + .runtime_value => |runtime_value| { + assert(runtime_value.ty == ip.typeOf(runtime_value.val)); + ip.items.appendAssumeCapacity(.{ + .tag = .runtime_value, + .data = @enumToInt(runtime_value.val), + }); + }, .struct_type => |struct_type| { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ @@ -2809,7 +3094,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); }, - .extern_func => @panic("TODO"), + .variable => |variable| { + const has_init = variable.init != .none; + if (has_init) assert(variable.ty == ip.typeOf(variable.init)); + ip.items.appendAssumeCapacity(.{ + .tag = .variable, + .data = try ip.addExtra(gpa, Variable{ + .init = if (has_init) variable.init else variable.ty, + .decl = variable.decl, + .lib_name = variable.lib_name, + .flags = .{ + .has_init = has_init, + .is_extern = variable.is_extern, + .is_const = variable.is_const, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }, + }), + }); + }, + + .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ + .tag = .extern_func, + .data = try ip.addExtra(gpa, extern_func), + }), + + .func => |func| ip.items.appendAssumeCapacity(.{ + .tag = .func, + .data = try ip.addExtra(gpa, func), + }), .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; @@ -2817,20 +3130,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .none => { assert(ptr_type.size != .Slice); switch (ptr.addr) { - .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_var, - .data = try ip.addExtra(gpa, PtrVar{ - .ty = ptr.ty, - .init = @"var".init, - .owner_decl = @"var".owner_decl, - .lib_name = @"var".lib_name, - .flags = .{ - .is_const = @"var".is_const, - .is_threadlocal = @"var".is_threadlocal, - .is_weak_linkage = @"var".is_weak_linkage, - }, - }), - }), .decl => |decl| ip.items.appendAssumeCapacity(.{ .tag = .ptr_decl, .data = try ip.addExtra(gpa, PtrDecl{ @@ -2846,31 +3145,41 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .runtime_index = mut_decl.runtime_index, }), }), - .int => |int| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt{ - .ty = ptr.ty, - .addr = int, - }), - }), - .eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{ - .tag = switch (ptr.addr) { - .eu_payload => .ptr_eu_payload, - .opt_payload => .ptr_opt_payload, - else => unreachable, - }, - .data = @enumToInt(data), - }), - .comptime_field => |field_val| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField{ - .ty = ptr.ty, - .field_val = field_val, - }), - }), + .int => |int| { + assert(int != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrAddr{ + .ty = ptr.ty, + .addr = int, + }), + }); + }, + .eu_payload, .opt_payload => |data| { + assert(data != .none); + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = @enumToInt(data), + }); + }, + .comptime_field => |field_val| { + assert(field_val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }); + }, .elem, .field => |base_index| { + assert(base_index.base != .none); _ = ip.map.pop(); - const index_index = try get(ip, gpa, .{ .int = .{ + const index_index = try ip.get(gpa, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); @@ -2894,7 +3203,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); - const ptr_index = try get(ip, gpa, new_key); + const ptr_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2921,8 +3230,25 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { + assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (int.storage) { + else => unreachable, + .lazy_align => .int_lazy_align, + .lazy_size => .int_lazy_size, + }, + .data = try ip.addExtra(gpa, IntLazy{ + .ty = int.ty, + .lazy_ty = lazy_ty, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + } switch (int.ty) { - .none => unreachable, .u8_type => switch (int.storage) { .big_int => |big_int| { ip.items.appendAssumeCapacity(.{ @@ -2938,6 +3264,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u16_type => switch (int.storage) { .big_int => |big_int| { @@ -2954,6 +3281,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u32_type => switch (int.storage) { .big_int => |big_int| { @@ -2970,6 +3298,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .i32_type => switch (int.storage) { .big_int => |big_int| { @@ -2987,6 +3316,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .usize_type => switch (int.storage) { .big_int => |big_int| { @@ -3007,6 +3337,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, .comptime_int_type => switch (int.storage) { .big_int => |big_int| { @@ -3041,6 +3372,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, else => {}, } @@ -3077,9 +3409,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, + .lazy_align, .lazy_size => unreachable, } }, + .err => |err| ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }), + + .error_union => |error_union| ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }), + + .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .tag = .enum_literal, + .data = @enumToInt(enum_literal), + }), + .enum_tag => |enum_tag| { assert(enum_tag.ty != .none); assert(enum_tag.int != .none); @@ -3131,9 +3491,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .aggregate => |aggregate| { - assert(aggregate.ty != .none); + const ty_key = ip.indexToKey(aggregate.ty); const aggregate_len = ip.aggregateTypeLen(aggregate.ty); switch (aggregate.storage) { + .bytes => { + assert(ty_key.array_type.child == .u8_type); + }, .elems => |elems| { assert(elems.len == aggregate_len); for (elems) |elem| assert(elem != .none); @@ -3151,9 +3514,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - switch (ip.indexToKey(aggregate.ty)) { + switch (ty_key) { .anon_struct_type => |anon_struct_type| { if (switch (aggregate.storage) { + .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { + if (value != ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = byte }, + } })) break false; + } else true, .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems), .repeated_elem => |elem| for (anon_struct_type.values) |value| { if (value != elem) break false; @@ -3173,34 +3542,80 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } if (switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..]) |byte| { + if (byte != bytes[0]) break false; + } else true, .elems => |elems| for (elems[1..]) |elem| { if (elem != elems[0]) break false; } else true, .repeated_elem => true, }) { + const elem = switch (aggregate.storage) { + .bytes => |bytes| elem: { + _ = ip.map.pop(); + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[0] }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + break :elem elem; + }, + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }; + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Repeated).Struct.fields.len, ); - ip.items.appendAssumeCapacity(.{ .tag = .repeated, .data = ip.addExtraAssumeCapacity(Repeated{ .ty = aggregate.ty, - .elem_val = switch (aggregate.storage) { - .elems => |elems| elems[0], - .repeated_elem => |elem| elem, - }, + .elem_val = elem, }), }); return @intToEnum(Index, ip.items.len - 1); } + switch (ty_key) { + .array_type => |array_type| if (array_type.child == .u8_type) { + const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none); + try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + .repeated_elem => |elem| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len), + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + } + if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = bytes.toString(), + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + else => {}, + } + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Aggregate).Struct.fields.len + aggregate_len, ); - ip.items.appendAssumeCapacity(.{ .tag = .aggregate, .data = ip.addExtraAssumeCapacity(Aggregate{ @@ -3423,12 +3838,16 @@ pub fn finishGetEnum( return @intToEnum(Index, ip.items.len - 1); } -pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { +pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; - const index = ip.map.getIndexAdapted(key, adapter).?; + const index = ip.map.getIndexAdapted(key, adapter) orelse return null; return @intToEnum(Index, index); } +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + return ip.getIfExists(key).?; +} + fn addStringsToMap( ip: *InternPool, gpa: Allocator, @@ -3500,9 +3919,11 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Module.Decl.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + Module.Fn.Index => @enumToInt(@field(extra, field.name)), MapIndex => @enumToInt(@field(extra, field.name)), OptionalMapIndex => @enumToInt(@field(extra, field.name)), RuntimeIndex => @enumToInt(@field(extra, field.name)), + String => @enumToInt(@field(extra, field.name)), NullTerminatedString => @enumToInt(@field(extra, field.name)), OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), @@ -3510,7 +3931,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), - PtrVar.Flags => @bitCast(u32, @field(extra, field.name)), + Variable.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -3566,9 +3987,11 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + Module.Fn.Index => @intToEnum(Module.Fn.Index, int32), MapIndex => @intToEnum(MapIndex, int32), OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), RuntimeIndex => @intToEnum(RuntimeIndex, int32), + String => @intToEnum(String, int32), NullTerminatedString => @intToEnum(NullTerminatedString, int32), OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), i32 => @bitCast(i32, int32), @@ -3576,7 +3999,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), - PtrVar.Flags => @bitCast(PtrVar.Flags, int32), + Variable.Flags => @bitCast(Variable.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -3700,8 +4123,8 @@ pub fn childType(ip: InternPool, i: Index) Index { /// Given a slice type, returns the type of the ptr field. pub fn slicePtrType(ip: InternPool, i: Index) Index { switch (i) { - .const_slice_u8_type => return .manyptr_const_u8_type, - .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + .slice_const_u8_type => return .manyptr_const_u8_type, + .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } const item = ip.items.get(@enumToInt(i)); @@ -3830,6 +4253,8 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }, } }); }, + + .lazy_align, .lazy_size => unreachable, } } @@ -3862,6 +4287,14 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } +pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .func) return .none; + const datas = ip.items.items(.data); + return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); +} + pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); @@ -3891,6 +4324,15 @@ pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { return tags[@enumToInt(ty)] == .type_inferred_error_set; } +/// The is only legal because the initializer is not part of the hash. +pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { + assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); + const field_index = inline for (@typeInfo(Variable).Struct.fields, 0..) |field, field_index| { + if (comptime std.mem.eql(u8, field.name, "init")) break field_index; + } else unreachable; + ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -3903,10 +4345,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); const unions_size = ip.allocated_unions.len * (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const funcs_size = ip.allocated_funcs.len * + (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + - structs_size + unions_size; + structs_size + unions_size + funcs_size; std.debug.print( \\InternPool size: {d} bytes @@ -3915,6 +4359,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { \\ {d} limbs: {d} bytes \\ {d} structs: {d} bytes \\ {d} unions: {d} bytes + \\ {d} funcs: {d} bytes \\ , .{ total_size, @@ -3928,6 +4373,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { structs_size, ip.allocated_unions.len, unions_size, + ip.allocated_funcs.len, + funcs_size, }); const tags = ip.items.items(.tag); @@ -3982,12 +4429,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .undef => 0, + .runtime_value => 0, .simple_type => 0, .simple_value => 0, - .ptr_var => @sizeOf(PtrVar), .ptr_decl => @sizeOf(PtrDecl), .ptr_mut_decl => @sizeOf(PtrMutDecl), - .ptr_int => @sizeOf(PtrInt), + .ptr_int => @sizeOf(PtrAddr), .ptr_eu_payload => 0, .ptr_opt_payload => 0, .ptr_comptime_field => @sizeOf(PtrComptimeField), @@ -4011,8 +4458,20 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const int = ip.limbData(Int, data); break :b @sizeOf(Int) + int.limbs_len * 8; }, + + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(TypeValue), + .enum_literal => 0, .enum_tag => @sizeOf(Key.EnumTag), + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + break :b @sizeOf(Bytes) + len + + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); + }, .aggregate => b: { const info = ip.extraData(Aggregate, data); const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); @@ -4028,8 +4487,9 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f80 => @sizeOf(Float80), .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => @sizeOf(Variable) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), }); @@ -4071,6 +4531,14 @@ pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Un return ip.allocated_unions.at(@enumToInt(index)); } +pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -4117,6 +4585,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } +pub fn createFunc( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn, +) Allocator.Error!Module.Fn.Index { + if (ip.funcs_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_funcs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); +} + +pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { + ip.funcPtr(index).* = undefined; + ip.funcs_free_list.append(gpa, index) catch { + // In order to keep `destroyFunc` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} + pub fn createInferredErrorSet( ip: *InternPool, gpa: Allocator, @@ -4142,9 +4629,25 @@ pub fn getOrPutString( s: []const u8, ) Allocator.Error!NullTerminatedString { const string_bytes = &ip.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); string_bytes.appendSliceAssumeCapacity(s); + string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, s.len + 1); +} + +/// Uses the last len bytes of ip.string_bytes as the key. +pub fn getOrPutTrailingString( + ip: *InternPool, + gpa: Allocator, + len: usize, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len - len); + if (len > 0 and string_bytes.getLast() == 0) { + _ = string_bytes.pop(); + } else { + try string_bytes.ensureUnusedCapacity(gpa, 1); + } const key: []const u8 = string_bytes.items[str_index..]; const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ .bytes = string_bytes, @@ -4179,6 +4682,10 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } +pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { + return ip.stringToSlice(s.unwrap() orelse return null); +} + pub fn typeOf(ip: InternPool, index: Index) Index { return ip.indexToKey(index).typeOf(); } @@ -4199,7 +4706,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool { +pub fn isNoReturn(ip: InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { diff --git a/src/Module.zig b/src/Module.zig index 8174778f48..fa24c237b4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -109,7 +109,7 @@ memoized_calls: MemoizedCallSet = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, +align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. @@ -242,22 +242,23 @@ pub const StringLiteralAdapter = struct { }; const MonomorphedFuncsSet = std.HashMapUnmanaged( - *Fn, + Fn.Index, void, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); const MonomorphedFuncsContext = struct { - pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { + mod: *Module, + + pub fn eql(ctx: @This(), a: Fn.Index, b: Fn.Index) bool { _ = ctx; return a == b; } /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: *Fn) u64 { - _ = ctx; - return key.hash; + pub fn hash(ctx: @This(), key: Fn.Index) u64 { + return ctx.mod.funcPtr(key).hash; } }; @@ -272,7 +273,7 @@ pub const MemoizedCall = struct { module: *Module, pub const Key = struct { - func: *Fn, + func: Fn.Index, args: []TypedValue, }; @@ -652,21 +653,12 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { const gpa = mod.gpa; - if (decl.getExternFn()) |extern_fn| { - extern_fn.deinit(gpa); - gpa.destroy(extern_fn); - } - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (func.comptime_args != null) { - _ = mod.monomorphed_funcs.remove(func); + if (mod.funcPtr(func).comptime_args != null) { + _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); } - func.deinit(gpa); - gpa.destroy(func); - } - if (decl.getVariable()) |variable| { - variable.deinit(gpa); - gpa.destroy(variable); + mod.destroyFunc(func); } if (decl.value_arena) |value_arena| { if (decl.owns_tv) { @@ -835,11 +827,11 @@ pub const Decl = struct { /// If the Decl has a value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct { - return mod.structPtrUnwrap(getStructIndex(decl, mod)); + pub fn getStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getStructIndex(mod)); } - pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { + pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; return mod.intern_pool.indexToStructType(decl.val.ip_index); @@ -847,7 +839,7 @@ pub const Decl = struct { /// If the Decl has a value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { + pub fn getUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; if (decl.val.ip_index == .none) return null; return mod.typeToUnion(decl.val.toType()); @@ -855,32 +847,30 @@ pub const Decl = struct { /// If the Decl has a value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: *const Decl) ?*Fn { - if (!decl.owns_tv) return null; - const func = (decl.val.castTag(.function) orelse return null).data; - return func; + pub fn getFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; } /// If the Decl has a value and it is an extern function, returns it, /// otherwise null. - pub fn getExternFn(decl: *const Decl) ?*ExternFn { - if (!decl.owns_tv) return null; - const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - return extern_fn; + pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; } /// If the Decl has a value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: *const Decl) ?*Var { - if (!decl.owns_tv) return null; - const variable = (decl.val.castTag(.variable) orelse return null).data; - return variable; + pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { + pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; return switch (decl.val.ip_index) { .empty_struct_type => .none, @@ -896,8 +886,8 @@ pub const Decl = struct { } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace { - return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null; + pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null; } pub fn dump(decl: *Decl) void { @@ -927,14 +917,11 @@ pub const Decl = struct { assert(decl.dependencies.swapRemove(other)); } - pub fn isExtern(decl: Decl) bool { + pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (decl.val.ip_index) { - .none => switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, - else => false, - }, + return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .variable => |variable| variable.is_extern, + .extern_func => true, else => false, }; } @@ -1494,6 +1481,28 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not /// seen a potential runtime call. It may be analyzed in future. @@ -1519,7 +1528,7 @@ pub const Fn = struct { /// or comptime functions. pub const InferredErrorSet = struct { /// The function from which this error set originates. - func: *Fn, + func: Fn.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that @@ -1543,8 +1552,8 @@ pub const Fn = struct { pub const Index = enum(u32) { _, - pub fn toOptional(i: Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(i)); + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i)); } }; @@ -1552,13 +1561,13 @@ pub const Fn = struct { none = std.math.maxInt(u32), _, - pub fn init(oi: ?Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none)); } - pub fn unwrap(oi: OptionalIndex) ?Index { + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @intToEnum(Index, @enumToInt(oi)); + return @intToEnum(InferredErrorSet.Index, @enumToInt(oi)); } }; @@ -1587,12 +1596,6 @@ pub const Fn = struct { } }; - /// TODO: remove this function - pub fn deinit(func: *Fn, gpa: Allocator) void { - _ = func; - _ = gpa; - } - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { const file = mod.declPtr(func.owner_decl).getFileScope(mod); @@ -1647,28 +1650,6 @@ pub const Fn = struct { } }; -pub const Var = struct { - /// if is_extern == true this is undefined - init: Value, - owner_decl: Decl.Index, - - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - is_extern: bool, - is_mutable: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - - pub fn deinit(variable: *Var, gpa: Allocator) void { - if (variable.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - pub const DeclAdapter = struct { mod: *Module, @@ -3472,6 +3453,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } +pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { + return mod.intern_pool.funcPtr(index); +} + pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { return mod.intern_pool.inferredErrorSetPtr(index); } @@ -3479,7 +3464,11 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { - return structPtr(mod, index.unwrap() orelse return null); + return mod.structPtr(index.unwrap() orelse return null); +} + +pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { + return mod.funcPtr(index.unwrap() orelse return null); } /// Returns true if and only if the Decl is the top level struct associated with a File. @@ -3952,7 +3941,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getFunction()) |func| { + if (decl.getFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -4139,7 +4128,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4229,10 +4218,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4264,7 +4254,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -4333,7 +4323,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_llvm_ir) return; - comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { + comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -4363,7 +4353,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4401,7 +4392,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -4532,8 +4523,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4628,8 +4621,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4707,8 +4702,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (decl_tv.val.castTag(.function)) |fn_payload| { - const func = fn_payload.data; + if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; @@ -4718,7 +4713,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction()) |prev_func| { + if (decl.getFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } @@ -4757,38 +4752,25 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { switch (decl_tv.val.ip_index) { .generic_poison => unreachable, .unreachable_value => unreachable, - - .none => switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } + else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => |variable| if (variable.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - is_extern = true; - } + + .extern_func => |extern_fn| if (extern_fn.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; + is_extern = true; }, - .function => {}, + .func => {}, else => { log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; }, }, - else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); - queue_linker_work = true; - }, } decl.ty = decl_tv.ty; @@ -4810,12 +4792,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.ip_index) { - .none => switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, - .variable => .variable, - else => .constant, - }, + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => .variable, + .extern_func, .func => .function, else => .constant, }; @@ -5388,7 +5367,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunction()) |_| { + if (decl.getFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5572,11 +5551,12 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -5597,8 +5577,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, + .func_index = func_index.toOptional(), .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, + .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; defer sema.deinit(); @@ -5807,8 +5789,7 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { for (kv.value) |err| err.deinit(mod.gpa); } if (decl.has_tv and decl.owns_tv) { - if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } } @@ -5852,6 +5833,14 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void { return mod.intern_pool.destroyUnion(mod.gpa, index); } +pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { + return mod.intern_pool.createFunc(mod.gpa, initialization); +} + +pub fn destroyFunc(mod: *Module, index: Fn.Index) void { + return mod.intern_pool.destroyFunc(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -6499,7 +6488,11 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const null_usize = try mod.intern(.{ .opt = .{ + .ty = try mod.intern(.{ .opt_type = .usize_type }), + .val = .none, + } }); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6512,7 +6505,7 @@ pub fn populateTestFunctions( const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ .ty = try mod.arrayType(.{ .len = test_fn_vals.len, - .child = tmp_test_fn_ty.ip_index, + .child = test_fn_ty.ip_index, .sentinel = .none, }), .val = try Value.Tag.aggregate.create(arena, test_fn_vals), @@ -6530,7 +6523,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), + .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6540,16 +6533,24 @@ pub fn populateTestFunctions( array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); try mod.linkerUpdateDecl(test_name_decl_index); - const field_vals = try arena.create([3]Value); - field_vals.* = .{ - try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try mod.intValue(Type.usize, test_name_slice.len), - }), // name - try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.null, // async_frame_size + const test_fn_fields = .{ + // name + try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = test_name_decl_index }, + } }), + // func + try mod.intern(.{ .ptr = .{ + .ty = test_decl.ty.ip_index, + .addr = .{ .decl = test_decl_index }, + } }), + // async_frame_size + null_usize, }; - test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); + test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.ip_index, + .storage = .{ .elems = &test_fn_fields }, + } })).toValue(); } try array_decl.finalizeNewArena(&new_decl_arena); @@ -6558,36 +6559,25 @@ pub fn populateTestFunctions( try mod.linkerUpdateDecl(array_decl_index); { - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - { - // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.ptr(arena, mod, .{ - .size = .Slice, - .pointee_type = tmp_test_fn_ty, - .mutable = false, - .@"addrspace" = .generic, - }); - const new_var = try gpa.create(Var); - errdefer gpa.destroy(new_var); - new_var.* = decl.val.castTag(.variable).?.data.*; - new_var.init = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try mod.intValue(Type.usize, mod.test_functions.count()), - }); - const new_val = try Value.Tag.variable.create(arena, new_var); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.clearValues(mod); - decl.ty = new_ty; - decl.val = new_val; - decl.has_tv = true; - } + const new_ty = try mod.ptrType(.{ + .elem_type = test_fn_ty.ip_index, + .is_const = true, + .size = .Slice, + }); + const new_val = decl.val; + const new_init = try mod.intern(.{ .ptr = .{ + .ty = new_ty.ip_index, + .addr = .{ .decl = array_decl_index }, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + } }); + mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); - try decl.finalizeNewArena(&new_decl_arena); + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.clearValues(mod); + decl.ty = new_ty; + decl.val = new_val; + decl.has_tv = true; } try mod.linkerUpdateDecl(decl_index); } @@ -6660,50 +6650,47 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - if (val.ip_index != .none) return; - switch (val.tag()) { - .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), - .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), - .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), - .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), - .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Value.Payload.Slice).?.data; - mod.markReferencedDeclsAlive(slice.ptr); - mod.markReferencedDeclsAlive(slice.len); - }, - - .elem_ptr => { - const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; - return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; - return mod.markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } + switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => { + for (val.castTag(.aggregate).?.data) |field_val| { + mod.markReferencedDeclsAlive(field_val); + } + }, + .@"union" => { + const data = val.castTag(.@"union").?.data; + mod.markReferencedDeclsAlive(data.tag); + mod.markReferencedDeclsAlive(data.val); + }, + else => {}, }, - .@"union" => { - const data = val.cast(Value.Payload.Union).?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); + }, + else => {}, }, - - else => {}, } } @@ -7075,6 +7062,12 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { return @intCast(u16, big.bitCountTwosComp()); }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign); + }, } } diff --git a/src/Sema.zig b/src/Sema.zig index 7df6e44898..d9b346e638 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28,10 +28,12 @@ owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, +owner_func_index: Module.Fn.OptionalIndex, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +func_index: Module.Fn.OptionalIndex, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -65,7 +67,7 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. -preallocated_new_func: ?*Module.Fn = null, +preallocated_new_func: Module.Fn.OptionalIndex = .none, /// The key is types that must be fully resolved prior to machine code /// generation pass. Types are added to this set when resolving them /// immediately could cause a dependency loop, but they do need to be resolved @@ -92,7 +94,7 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{} const std = @import("std"); const math = std.math; const mem = std.mem; -const Allocator = std.mem.Allocator; +const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); @@ -1777,7 +1779,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.const_slice_u8; + const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1866,11 +1868,10 @@ fn resolveConstMaybeUndefVal( if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1889,11 +1890,11 @@ fn resolveConstValue( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1928,11 +1929,11 @@ fn resolveMaybeUndefVal( const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + .none => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return null, else => return val, }, - else => return val, } } @@ -1948,21 +1949,20 @@ fn resolveMaybeUndefValIntable( var check = val; while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, - else => { - try sema.resolveLazyValue(val); - return val; + .none => break, + else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + .variable => return null, + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return null, + .int => break, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), }, - }, - else => { - try sema.resolveLazyValue(val); - return val; + else => break, }, }; + try sema.resolveLazyValue(val); + return val; } /// Returns all Value tags including `variable` and `undef`. @@ -1994,7 +1994,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (air_tags[i] == .constant) { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tagIsVariable()) return val; + if (val.getVariable(sema.mod) != null) return val; } return opv; } @@ -2003,7 +2003,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( .constant => { const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.isRuntimeValue()) make_runtime.* = true; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, @@ -2489,13 +2489,13 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE .@"addrspace" = addr_space, }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant( - ptr_ty, - try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = iac.data.decl_index, + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.ip_index, + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, .runtime_index = block.runtime_index, - }), - ); + } }, + } })).toValue()); }, else => {}, } @@ -2949,12 +2949,18 @@ fn zirEnumDecl( } const prev_owner_func = sema.owner_func; + const prev_owner_func_index = sema.owner_func_index; sema.owner_func = null; + sema.owner_func_index = .none; defer sema.owner_func = prev_owner_func; + defer sema.owner_func_index = prev_owner_func_index; const prev_func = sema.func; + const prev_func_index = sema.func_index; sema.func = null; + sema.func_index = .none; defer sema.func = prev_func; + defer sema.func_index = prev_func_index; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); @@ -3735,14 +3741,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - if (var_is_mut) { - sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = decl_index, + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.ip_index, + .addr = if (var_is_mut) .{ .mut_decl = .{ + .decl = decl_index, .runtime_index = block.runtime_index, - }); - } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); - } + } } else .{ .decl = decl_index }, + } })).toValue(); }, .inferred_alloc => { assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); @@ -3836,7 +3841,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_elem_ty.ip_index, + .addr = .{ .decl = new_decl_index }, + } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const // ZIR handles it later, so we can just use the ty ref here. air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; @@ -4332,12 +4340,16 @@ fn validateUnionInit( // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - var union_val = try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val, - }); - if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); - const union_init = try sema.addConstant(union_ty, union_val); + var union_val = try mod.intern(.{ .un = .{ + .ty = union_ty.ip_index, + .tag = tag_val.ip_index, + .val = val.ip_index, + } }); + if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ + .ty = union_ty.ip_index, + .val = union_val, + } }); + const union_init = try sema.addConstant(union_ty, union_val.toValue()); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { @@ -4464,14 +4476,15 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod)); + const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); + defer sema.gpa.free(field_values); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv; + field_values[i] = opv.ip_index; continue; } @@ -4536,7 +4549,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val; + field_values[i] = val.ip_index; } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4570,7 +4583,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val; + field_values[i] = default_val.ip_index; } if (root_msg) |msg| { @@ -4593,9 +4606,15 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); - if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); - const struct_init = try sema.addConstant(struct_ty, struct_val); + var struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.ip_index, + .storage = .{ .elems = field_values }, + } }); + if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ + .ty = struct_ty.ip_index, + .val = struct_val, + } }); + const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } @@ -4611,7 +4630,7 @@ fn validateStructInit( else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); const field_ty = sema.typeOf(default_field_ptr).childType(mod); - const init = try sema.addConstant(field_ty, field_values[i]); + const init = try sema.addConstant(field_ty, field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } @@ -4691,7 +4710,8 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, array_len_s); + const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); + defer sema.gpa.free(element_vals); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4701,13 +4721,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } @@ -4768,7 +4788,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val; + element_vals[i] = val.ip_index; } else { array_is_comptime = false; } @@ -4780,9 +4800,12 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - if (ptr_val.tag() == .comptime_field_ptr) { - // This store was validated by the individual elem ptrs. - return; + switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => return, // This store was validated by the individual elem ptrs. + else => {}, + }, + else => {}, } } @@ -4790,14 +4813,20 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val; + element_vals[instrs.len] = sentinel_val.ip_index; } block.instructions.shrinkRetainingCapacity(first_block_index); - var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); - if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); - const array_init = try sema.addConstant(array_ty, array_val); + var array_val = try mod.intern(.{ .aggregate = .{ + .ty = array_ty.ip_index, + .storage = .{ .elems = element_vals }, + } }); + if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ + .ty = array_ty.ip_index, + .val = array_val, + } }); + const array_init = try sema.addConstant(array_ty, array_val.toValue()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } @@ -5029,7 +5058,7 @@ fn storeToInferredAllocComptime( // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tagIsVariable()) break :store; + if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( @@ -5717,8 +5746,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { try mod.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); - if (exported_decl.val.castTag(.function)) |some| { - return sema.analyzeExport(block, src, options, some.data.owner_decl); + if (exported_decl.getFunction(mod)) |function| { + return sema.analyzeExport(block, src, options, function.owner_decl); } } try sema.analyzeExport(block, src, options, decl_index); @@ -5741,17 +5770,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => |e| return e, }; - const decl_index = switch (operand.val.tag()) { - .function => operand.val.castTag(.function).?.data.owner_decl, - else => blk: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :blk try anon_decl.finish( - operand.ty, - try operand.val.copy(anon_decl.arena()), - 0, - ); - }, + const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + break :blk try anon_decl.finish( + operand.ty, + try operand.val.copy(anon_decl.arena()), + 0, + ); }; try sema.analyzeExport(block, src, options, decl_index); } @@ -5788,7 +5814,7 @@ pub fn analyzeExport( } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern()) { + if (exported_decl.isExtern(mod)) { return sema.fail(block, src, "export target cannot be extern", .{}); } @@ -5796,7 +5822,7 @@ pub fn analyzeExport( mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - const gpa = mod.gpa; + const gpa = sema.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); @@ -5852,8 +5878,9 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment, }); } - const func = sema.func orelse + const func_index = sema.func_index.unwrap() orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); + const func = mod.funcPtr(func_index); const fn_owner_decl = mod.declPtr(func.owner_decl); switch (fn_owner_decl.ty.fnCallingConvention(mod)) { @@ -5864,7 +5891,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6191,10 +6218,13 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (func_val.tag()) { - .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, - .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => return null, + }, else => return null, }; return mod.declPtr(owner_decl_index); @@ -6576,20 +6606,22 @@ const GenericCallAdapter = struct { is_anytype: bool, }; - pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { + pub fn eql(ctx: @This(), adapted_key: void, other_key: Module.Fn.Index) bool { _ = adapted_key; + const other_func = ctx.module.funcPtr(other_key); + // Checking for equality may happen on an item that has been inserted // into the map but is not yet fully initialized. In such case, the // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false; + if (ctx.generic_fn.owner_decl != other_func.generic_owner_decl.unwrap().?) return false; - const other_comptime_args = other_key.comptime_args.?; + const other_comptime_args = other_func.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { const this_arg = ctx.args[i]; const this_is_comptime = !this_arg.val.isGenericPoison(); const other_is_comptime = !other_arg.val.isGenericPoison(); const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); + const other_is_anytype = other_func.isAnytypeParam(ctx.module, @intCast(u32, i)); if (other_is_anytype != this_is_anytype) return false; if (other_is_comptime != this_is_comptime) return false; @@ -6663,7 +6695,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6760,18 +6792,21 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn = switch (func_val.tag()) { - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, - .function => func_val.castTag(.function).?.data, - .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - else => { - assert(callee_ty.isPtrAtRuntime(mod)); - return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), - }); + .func => |function| function.index, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?, + else => { + assert(callee_ty.isPtrAtRuntime(mod)); + return sema.fail(block, call_src, "{s} call of function pointer", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }); + }, }, + else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ @@ -6804,6 +6839,7 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; @@ -6819,8 +6855,11 @@ fn analyzeCall( } const parent_func = sema.func; + const parent_func_index = sema.func_index; sema.func = module_fn; + sema.func_index = module_fn_index.toOptional(); defer sema.func = parent_func; + defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; @@ -6856,7 +6895,7 @@ fn analyzeCall( defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); if (is_comptime_call) { memoized_call_key = .{ - .func = module_fn, + .func = module_fn_index, .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), }; delete_memoized_call_key = true; @@ -6889,7 +6928,7 @@ fn analyzeCall( &child_block, .unneeded, inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6907,7 +6946,7 @@ fn analyzeCall( &child_block, mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6950,7 +6989,7 @@ fn analyzeCall( const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = module_fn, + .func = module_fn_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -6982,7 +7021,7 @@ fn analyzeCall( const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -7014,7 +7053,7 @@ fn analyzeCall( error.ComptimeReturn => break :result inlining.comptime_result, error.AnalysisFail => { const err_msg = sema.err orelse return err; - if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err; + if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); err_msg.clearTrace(sema.gpa); return err; @@ -7027,8 +7066,8 @@ fn analyzeCall( if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, - module_fn, - parent_func.?, + module_fn_index, + parent_func_index.unwrap().?, mod.declPtr(parent_func.?.owner_decl).ty, .dbg_inline_end, ); @@ -7120,8 +7159,8 @@ fn analyzeCall( } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (func_val.castTag(.function)) |func_obj| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { + try sema.mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7147,9 +7186,9 @@ fn analyzeCall( // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { - var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; - switch (func_val.tag()) { - .function, .decl_ref => { + const func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; + switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func, .extern_func, .ptr => { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; }, @@ -7196,7 +7235,7 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: InternPool.Key.FuncType, + new_fn_info: *InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, @@ -7263,7 +7302,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = param_ty.toType(), .val = arg_val, @@ -7302,7 +7341,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = sema.typeOf(uncasted_arg), .val = arg_val, @@ -7387,11 +7426,11 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (func_val.tag()) { - .function => func_val.castTag(.function).?.data, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .func => |function| function.index, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, - }; + }); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. @@ -7496,16 +7535,17 @@ fn instantiateGenericCall( .args = generic_args, .module = mod, }; - const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); - const callee = if (!gop.found_existing) callee: { - const new_module_func = try gpa.create(Module.Fn); + const gop = try mod.monomorphed_funcs.getOrPutContextAdapted(gpa, {}, adapter, .{ .mod = mod }); + const callee_index = if (!gop.found_existing) callee: { + const new_module_func_index = try mod.createFunc(undefined); + const new_module_func = mod.funcPtr(new_module_func_index); // This ensures that we can operate on the hash map before the Module.Fn // struct is fully initialized. new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func; + gop.key_ptr.* = new_module_func_index; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); @@ -7549,7 +7589,7 @@ fn instantiateGenericCall( new_decl_index, uncasted_args, module_fn, - new_module_func, + new_module_func_index, namespace_index, func_ty_info, call_src, @@ -7565,12 +7605,12 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.remove(new_module_func)); - gpa.destroy(new_module_func); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); + mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.remove(new_module_func)); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); { errdefer new_decl_arena.deinit(); try new_decl.finalizeNewArena(&new_decl_arena); @@ -7590,6 +7630,7 @@ fn instantiateGenericCall( try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; } else gop.key_ptr.*; + const callee = mod.funcPtr(callee_index); callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); @@ -7645,7 +7686,7 @@ fn instantiateGenericCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try sema.mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7682,12 +7723,12 @@ fn resolveGenericInstantiationType( new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, module_fn: *Module.Fn, - new_module_func: *Module.Fn, + new_module_func: Module.Fn.Index, namespace: Namespace.Index, func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, -) !*Module.Fn { +) !Module.Fn.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -7707,11 +7748,13 @@ fn resolveGenericInstantiationType( .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func, + .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, @@ -7802,8 +7845,8 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.castTag(.function).?.data; - errdefer new_func.deinit(gpa); + const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; + errdefer mod.destroyFunc(new_func); assert(new_func == new_module_func); arg_i = 0; @@ -7867,7 +7910,10 @@ fn resolveGenericInstantiationType( return error.GenericPoison; } - new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); + new_decl.val = (try mod.intern(.{ .func = .{ + .ty = new_decl.ty.ip_index, + .index = new_func, + } })).toValue(); new_decl.@"align" = 0; new_decl.has_tv = true; new_decl.owns_tv = true; @@ -7900,8 +7946,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) fn emitDbgInline( sema: *Sema, block: *Block, - old_func: *Module.Fn, - new_func: *Module.Fn, + old_func: Module.Fn.Index, + new_func: Module.Fn.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { @@ -7910,7 +7956,10 @@ fn emitDbgInline( // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func)); + try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ + .ty = new_func_ty.ip_index, + .index = new_func, + } })).toValue()); _ = try block.addInst(.{ .tag = tag, .data = .{ .ty_pl = .{ @@ -8078,12 +8127,11 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const name = inst_data.get(sema.code); // Create an error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(name); - return sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ - .name = kv.key, - }), - ); + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); } fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -8101,23 +8149,11 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - switch (val.tag()) { - .@"error" => { - return sema.addConstant( - Type.err_int, - try mod.intValue( - Type.err_int, - (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - ), - ); - }, - - // This is not a valid combination with the type `anyerror`. - .the_only_possible_value => unreachable, - - // Assume it's already encoded as an integer. - else => return sema.addConstant(Type.err_int, val), - } + const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + return sema.addConstant(Type.err_int, try mod.intValue( + Type.err_int, + (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, + )); } const op_ty = sema.typeOf(uncasted_operand); @@ -8142,23 +8178,21 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - const payload = try sema.arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = sema.mod.error_name_list.items[int] }, - }; - return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); + return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { @@ -8234,12 +8268,12 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.addConstant( - .{ .ip_index = .enum_literal_type }, - try Value.Tag.enum_literal.create(sema.arena, duped_name), - ); + const name = inst_data.get(sema.code); + return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{ + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name), + })).toValue()); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8404,32 +8438,26 @@ fn analyzeOptionalPayloadPtr( if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the optional non-null bit would be set that way. But in this case, // we need to emit a runtime instruction to do it. _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8532,11 +8560,13 @@ fn analyzeErrUnionPayload( const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - const data = val.castTag(.eu_payload).?.data; - return sema.addConstant(payload_ty, data); + return sema.addConstant( + payload_ty, + mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + ); } try sema.requireRuntimeBlock(block, src, null); @@ -8595,33 +8625,26 @@ fn analyzeErrUnionPayloadPtr( if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the error union error code would be set that way. But in this case, // we need to emit a runtime instruction to do it. try sema.requireRuntimeBlock(block, src, null); _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8664,7 +8687,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } @@ -8694,7 +8717,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } } @@ -8931,20 +8954,21 @@ fn funcCommon( } var destroy_fn_on_error = false; - const new_func: *Module.Fn = new_func: { + const new_func_index = new_func: { if (!has_body) break :new_func undefined; if (sema.comptime_args_fn_inst == func_inst) { - const new_func = sema.preallocated_new_func.?; - sema.preallocated_new_func = null; // take ownership - break :new_func new_func; + const new_func_index = sema.preallocated_new_func.unwrap().?; + sema.preallocated_new_func = .none; // take ownership + break :new_func new_func_index; } destroy_fn_on_error = true; - const new_func = try gpa.create(Module.Fn); + var new_func: Module.Fn = undefined; // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; - break :new_func new_func; + const new_func_index = try mod.createFunc(new_func); + break :new_func new_func_index; }; - errdefer if (destroy_fn_on_error) gpa.destroy(new_func); + errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { @@ -9008,7 +9032,7 @@ fn funcCommon( else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = new_func, + .func = new_func_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -9158,26 +9182,16 @@ fn funcCommon( sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try gpa.create(Module.ExternFn); - errdefer gpa.destroy(new_extern_fn); - - new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl_index, - .lib_name = null, - }; - - if (opt_lib_name) |lib_name| { - new_extern_fn.lib_name = try sema.handleExternLibName(block, .{ - .node_offset_lib_name = src_node_offset, - }, lib_name); - } - - const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn); - extern_fn_payload.* = .{ - .base = .{ .tag = .extern_fn }, - .data = new_extern_fn, - }; - return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ + .ty = fn_ty.ip_index, + .decl = sema.owner_decl_index, + .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( + gpa, + try sema.handleExternLibName(block, .{ + .node_offset_lib_name = src_node_offset, + }, lib_name), + )).toOptional() else .none, + } })).toValue()); } if (!has_body) { @@ -9191,9 +9205,9 @@ fn funcCommon( break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; + const new_func = mod.funcPtr(new_func_index); const hash = new_func.hash; const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, @@ -9208,11 +9222,10 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - fn_payload.* = .{ - .base = .{ .tag = .function }, - .data = new_func, - }; - return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ + .ty = fn_ty.ip_index, + .index = new_func_index, + } })).toValue()); } fn analyzeParameter( @@ -9312,7 +9325,7 @@ fn zirParam( const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; block.params = .{}; - sema.preallocated_new_func = null; + sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; defer { block.params.deinit(sema.gpa); @@ -9369,7 +9382,7 @@ fn zirParam( else => |e| return e, } or comptime_syntax; if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != null) { + if (is_comptime and sema.preallocated_new_func != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9392,7 +9405,7 @@ fn zirParam( assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != null) { + if (sema.preallocated_new_func != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. @@ -9640,8 +9653,8 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); - const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); + const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, @@ -9649,7 +9662,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9673,10 +9686,7 @@ fn intCast( // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); - const dest_max_val = if (is_vector) - try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) - else - dest_max_val_scalar; + const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); @@ -9732,7 +9742,8 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); + const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -10139,17 +10150,18 @@ fn zirSwitchCapture( .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = union_val, - .container_ty = operand_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } - const tag_and_val = union_val.castTag(.@"union").?.data; - return sema.addConstant(field_ty, tag_and_val.val); + return sema.addConstant( + field_ty, + mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + ); } if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ @@ -10243,14 +10255,13 @@ fn zirSwitchCapture( }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { - return sema.addConstant( - field_ty_ptr, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = op_ptr_val, - .container_ty = operand_ty, - .field_index = first_field_index, - }), - ); + return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ + .ty = field_ty_ptr.ip_index, + .addr = .{ .field = .{ + .base = op_ptr_val.ip_index, + .index = first_field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr); @@ -10273,7 +10284,7 @@ fn zirSwitchCapture( const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?); + const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?); names.putAssumeCapacityNoClobber(name_ip, {}); } const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); @@ -10284,7 +10295,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try mod.singleErrorSetType(item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10809,10 +10820,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - const min_int = try operand_ty.minInt(arena.allocator(), mod); + const min_int = try operand_ty.minInt(mod); const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { @@ -11493,8 +11501,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name }); - const item_ref = try sema.addConstant(operand_ty, item_val); + const item_val = try mod.intern(.{ .err = .{ + .ty = operand_ty.ip_index, + .name = error_name_ip, + } }); + const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11665,7 +11676,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(sema.arena, mod); + const min = try ty.minInt(mod); const max = try ty.maxIntScalar(mod, Type.comptime_int); return RangeSetUnhandledIterator{ @@ -11788,9 +11799,10 @@ fn validateSwitchItemError( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = item_tv.val.castTag(.@"error").?.data.name; + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11983,7 +11995,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { if (!operand_ty.isError(mod)) return; - if (val.getError() == null) return; + if (val.getError(mod) == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -12005,7 +12017,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(sema.mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } } @@ -12172,11 +12184,11 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); - return result_inst; + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = mod.intern_pool.getString(kv.key).unwrap().?, + } })).toValue()); } fn zirShl( @@ -12301,7 +12313,7 @@ fn zirShl( { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, mod, lhs_ty), + try lhs_ty.maxInt(mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12316,7 +12328,7 @@ fn zirShl( if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12466,7 +12478,7 @@ fn zirShr( const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -13179,11 +13191,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13203,11 +13211,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13254,8 +13258,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13325,9 +13327,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13427,8 +13427,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13469,9 +13467,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13555,7 +13551,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => unreachable, }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13600,8 +13596,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13644,9 +13638,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13721,8 +13713,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13765,9 +13755,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13843,12 +13831,9 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(sema.arena, mod); + const min_int = try resolved_type.minInt(mod); const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); - const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) - try Value.Tag.repeated.create(sema.arena, neg_one_scalar) - else - neg_one_scalar; + const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. @@ -13924,7 +13909,7 @@ fn addDivByZeroSafety( else try mod.floatValue(resolved_type.scalarType(mod), 0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -14012,9 +13997,10 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ + .ty = resolved_type.ip_index, + .storage = .{ .repeated_elem = scalar_zero.ip_index }, + } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14399,12 +14385,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14425,7 +14411,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14444,9 +14430,9 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } } @@ -14454,9 +14440,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } } @@ -14478,12 +14464,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14544,10 +14530,14 @@ fn zirOverflowArithmetic( return block.addAggregateInit(tuple_ty, element_refs); } -fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { +fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; - return Value.Tag.repeated.create(sema.arena, val); + const repeated = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = val.ip_index }, + } }); + return repeated.toValue(); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { @@ -14603,8 +14593,6 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -14853,9 +14841,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :lz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14886,9 +14872,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :rz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14931,9 +14915,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14947,9 +14929,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14979,9 +14959,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14994,9 +14972,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -15138,7 +15114,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15184,7 +15160,7 @@ fn zirAsm( const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const is_global_assembly = sema.func == null; + const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source); @@ -15387,12 +15363,7 @@ fn zirCmpEq( if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, - // or calling to Module.getErrorValue to get the values and then compare them is - // faster. - const lhs_name = lval.castTag(.@"error").?.data.name; - const rhs_name = rval.castTag(.@"error").?.data.name; - if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + if (lval.toIntern() == rval.toIntern()) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15650,8 +15621,8 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const val = try ty.lazyAbiSize(mod, sema.arena); - if (val.isLazySize()) { + const val = try ty.lazyAbiSize(mod); + if (val.isLazySize(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -15760,11 +15731,11 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15788,11 +15759,11 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15868,14 +15839,17 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = std.mem.span(fn_owner_decl.name); + const name = mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; const file_name_val = blk: { @@ -15888,27 +15862,35 @@ fn zirBuiltinSrc( try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; - const field_values = try sema.arena.alloc(Value, 4); - // file: [:0]const u8, - field_values[0] = file_name_val; - // fn_name: [:0]const u8, - field_values[1] = func_name_val; - // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); - // column: u32, - field_values[3] = try mod.intValue(Type.u32, extra.column + 1); - - return sema.addConstant( - try sema.getBuiltinType("SourceLocation"), - try Value.Tag.aggregate.create(sema.arena, field_values), - ); + const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const fields = .{ + // file: [:0]const u8, + file_name_val, + // fn_name: [:0]const u8, + func_name_val, + // line: u32, + try mod.intern(.{ .runtime_value = .{ + .ty = .u32_type, + .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + } }), + // column: u32, + (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + }; + return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ + .ty = src_loc_ty.ip_index, + .storage = .{ .elems = &fields }, + } })).toValue()); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -15916,69 +15898,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_info_tag_ty = type_info_ty.unionTagType(mod).?; switch (ty.zigTypeTag(mod)) { - .Type => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)), - .val = Value.void, - }), - ), - .Void => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)), - .val = Value.void, - }), - ), - .Bool => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)), - .val = Value.void, - }), - ), - .NoReturn => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)), - .val = Value.void, - }), - ), - .ComptimeFloat => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)), - .val = Value.void, - }), - ), - .ComptimeInt => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)), - .val = Value.void, - }), - ), - .Undefined => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)), - .val = Value.void, - }), - ), - .Null => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)), - .val = Value.void, - }), - ), - .EnumLiteral => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)), - .val = Value.void, - }), - ), + .Type, + .Void, + .Bool, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .val = .void_value, + } })).toValue()), .Fn => { // TODO: look into memoizing this result. const info = mod.typeToFunc(ty).?; @@ -15986,11 +15919,34 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); - const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); + const fn_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Fn", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); + const fn_info_ty = fn_info_decl.val.toType(); + + const param_info_decl_index = (try sema.namespaceLookup( + block, + src, + fn_info_ty.getNamespaceIndex(mod).unwrap().?, + "Param", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); + const param_info_ty = param_info_decl.val.toType(); + + const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); + defer gpa.free(param_vals); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -15999,87 +15955,74 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :blk @truncate(u1, info.noalias_bits >> index) != 0; }; - const param_fields = try params_anon_decl.arena().create([3]Value); - param_fields.* = .{ + const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic), + Value.makeBool(is_generic).ip_index, // is_noalias: bool, - Value.makeBool(is_noalias), + Value.makeBool(is_noalias).ip_index, // type: ?type, - param_ty_val.toValue(), + param_ty_val, }; - param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); + param_val.* = try mod.intern(.{ .aggregate = .{ + .ty = param_info_ty.ip_index, + .storage = .{ .elems = ¶m_fields }, + } }); } const args_val = v: { - const fn_info_decl_index = (try sema.namespaceLookup( - block, - src, - type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Fn", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = mod.declPtr(fn_info_decl_index); - const fn_ty = fn_info_decl.val.toType(); - const param_info_decl_index = (try sema.namespaceLookup( - block, - src, - fn_ty.getNamespaceIndex(mod).unwrap().?, - "Param", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = mod.declPtr(param_info_decl_index); - const param_ty = param_info_decl.val.toType(); + const args_slice_ty = try mod.ptrType(.{ + .elem_type = param_info_ty.ip_index, + .size = .Slice, + .is_const = true, + }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_ty.ip_index, + .child = param_info_ty.ip_index, .sentinel = .none, }), - try Value.Tag.aggregate.create( - params_anon_decl.arena(), - param_vals, - ), + (try mod.intern(.{ .aggregate = .{ + .ty = args_slice_ty.ip_index, + .storage = .{ .elems = param_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, param_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = args_slice_ty.ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + } }); }; - const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const ret_ty_opt = try mod.intern(.{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); - const field_values = try sema.arena.create([6]Value); - field_values.* = .{ + const field_values = .{ // calling_convention: CallingConvention, - try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)), + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, // is_generic: bool, - Value.makeBool(info.is_generic), + Value.makeBool(info.is_generic).ip_index, // is_var_args: bool, - Value.makeBool(info.is_var_args), + Value.makeBool(info.is_var_args).ip_index, // return_type: ?type, - ret_ty_opt.toValue(), + ret_ty_opt, // args: []const Fn.Param, args_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = fn_info_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Int => { const signedness_ty = try sema.getBuiltinType("Signedness"); @@ -16099,24 +16042,36 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Float => { - const field_values = try sema.arena.alloc(Value, 1); - // bits: u16, - field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod)); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const float_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Float", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); + try sema.ensureDeclAnalyzed(float_info_decl_index); + const float_info_decl = mod.declPtr(float_info_decl_index); + const float_ty = float_info_decl.val.toType(); + + const field_vals = .{ + // bits: u16, + (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = float_ty.ip_index, + .storage = .{ .elems = &field_vals }, + } }), + } })).toValue()); }, .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try mod.intValue(Type.comptime_int, info.@"align") else - try info.pointee_type.lazyAbiAlignment(mod, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod); const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const pointer_ty = t: { @@ -16245,9 +16200,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: { + const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try fields_anon_decl.arena().alloc(Value, names.len); + const vals = try gpa.alloc(InternPool.Index, names.len); + defer gpa.free(vals); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { @@ -16259,70 +16215,91 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const error_field_fields = try fields_anon_decl.arena().create([1]Value); - error_field_fields.* = .{ + const error_field_fields = .{ // name: []const u8, name_val, }; - - field_val.* = try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - error_field_fields, - ); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.ip_index, + .storage = .{ .elems = &error_field_fields }, + } }); } break :blk vals; }; // Build our ?[]const Error value - const errors_val = if (error_field_vals) |vals| v: { + const slice_errors_ty = try mod.ptrType(.{ + .elem_type = error_field_ty.ip_index, + .size = .Slice, + .is_const = true, + }); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { + const array_errors_ty = try mod.arrayType(.{ + .len = vals.len, + .child = error_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = vals.len, - .child = error_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - vals, - ), + array_errors_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_errors_ty.ip_index, + .storage = .{ .elems = vals }, + } })).toValue(), 0, // default alignment ); - - const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = new_decl_val, - .len = try mod.intValue(Type.usize, vals.len), - }); - break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); - } else Value.null; + break :v try mod.intern(.{ .ptr = .{ + .ty = slice_errors_ty.ip_index, + .addr = .{ .decl = new_decl }, + } }); + } else .none; + const errors_val = try mod.intern(.{ .opt = .{ + .ty = opt_slice_errors_ty.ip_index, + .val = errors_payload_val, + } }); // Construct Type{ .ErrorSet = errors_val } - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)), - .val = errors_val, - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .val = errors_val, + } })).toValue()); }, .ErrorUnion => { - const field_values = try sema.arena.alloc(Value, 2); - // error_set: type, - field_values[0] = ty.errorUnionSet(mod).toValue(); - // payload: type, - field_values[1] = ty.errorUnionPayload(mod).toValue(); + const error_union_field_ty = t: { + const error_union_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "ErrorUnion", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); + const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); + break :t error_union_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // error_set: type, + ty.errorUnionSet(mod).ip_index, + // payload: type, + ty.errorUnionPayload(mod).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = error_union_field_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. @@ -16346,7 +16323,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); + const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); + defer gpa.free(enum_field_vals); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; @@ -16360,56 +16338,81 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const enum_field_fields = try fields_anon_decl.arena().create([2]Value); - enum_field_fields.* = .{ + const enum_field_fields = .{ // name: []const u8, name_val, // value: comptime_int, - try mod.intValue(Type.comptime_int, i), + (try mod.intValue(Type.comptime_int, i)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = enum_field_ty.ip_index, + .storage = .{ .elems = &enum_field_fields }, + } }); } const fields_val = v: { + const fields_array_ty = try mod.arrayType(.{ + .len = enum_field_vals.len, + .child = enum_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - enum_field_vals, - ), + fields_array_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = fields_array_ty.ip_index, + .storage = .{ .elems = enum_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = enum_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const type_enum_ty = t: { + const type_enum_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Enum", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); + try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); + const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); + break :t type_enum_ty_decl.val.toType(); + }; + + const field_values = .{ // tag_type: type, - enum_type.tag_ty.toValue(), + enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive, + is_exhaustive.ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_enum_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Union => { // TODO: look into memoizing this result. @@ -16417,6 +16420,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_union_ty = t: { + const type_union_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Union", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); + try sema.ensureDeclAnalyzed(type_union_ty_decl_index); + const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); + break :t type_union_ty_decl.val.toType(); + }; + const union_field_ty = t: { const union_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16435,7 +16451,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const layout = union_ty.containerLayout(mod); const union_fields = union_ty.unionFields(mod); - const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); + const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count()); + defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; @@ -16449,51 +16466,62 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const union_field_fields = try fields_anon_decl.arena().create([3]Value); const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(field), .Packed => 0, }; - union_field_fields.* = .{ + const union_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = union_field_ty.ip_index, + .storage = .{ .elems = &union_field_fields }, + } }); } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = union_field_vals.len, + .child = union_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = union_field_vals.len, - .child = union_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, union_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = union_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, union_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = union_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { - const ty_val = tag_ty.toValue(); - break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); - } else Value.null; + const enum_tag_ty_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16508,10 +16536,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const field_values = .{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // tag_type: ?type, enum_tag_ty_val, @@ -16520,14 +16547,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_union_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Struct => { // TODO: look into memoizing this result. @@ -16535,6 +16562,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_struct_ty = t: { + const type_struct_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Struct", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); + try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); + const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); + break :t type_struct_ty_decl.val.toType(); + }; + const struct_field_ty = t: { const struct_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16547,14 +16587,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); break :t struct_field_ty_decl.val.toType(); }; + const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout const layout = struct_ty.containerLayout(mod); - const struct_field_vals = fv: { + var struct_field_vals: []InternPool.Index = &.{}; + defer gpa.free(struct_field_vals); + fv: { const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { .anon_struct_type => |tuple| { - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len); + struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( tuple.types, tuple.values, @@ -16574,38 +16617,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) field_val.toValue() else null; const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field_ty.toValue(), + field_ty, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(is_comptime), + Value.makeBool(is_comptime).ip_index, // alignment: comptime_int, - try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()), + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + struct_field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; + break :fv; }, .struct_type => |s| s, else => unreachable, }; - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - break :fv &[0]Value{}; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count()); + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; + struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); for ( struct_field_vals, @@ -16621,13 +16666,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const opt_default_val = if (field.default_val.ip_index == .unreachable_value) null else @@ -16635,55 +16680,61 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); const alignment = field.alignment(mod, layout); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(field.is_comptime), + Value.makeBool(field.is_comptime).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; - }; + } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = struct_field_vals.len, + .child = struct_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, struct_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = struct_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, struct_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = struct_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); - const backing_integer_val = blk: { - if (layout == .Packed) { + const backing_integer_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - const backing_int_ty_val = struct_obj.backing_int_ty.toValue(); - break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); - } else { - break :blk Value.null; - } - }; + break :val struct_obj.backing_int_ty.ip_index; + } else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16698,10 +16749,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([5]Value); - field_values.* = .{ + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16709,36 +16759,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)), + Value.makeBool(struct_ty.isTuple(mod)).ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_struct_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Opaque => { // TODO: look into memoizing this result. + const type_opaque_ty = t: { + const type_opaque_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Opaque", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); + try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); + const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); + break :t type_opaque_ty_decl.val.toType(); + }; + const opaque_ty = try sema.resolveTypeFields(ty); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); - const field_values = try sema.arena.create([1]Value); - field_values.* = .{ + const field_values = .{ // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_opaque_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), @@ -16751,7 +16813,7 @@ fn typeInfoDecls( src: LazySrcLoc, type_info_ty: Type, opt_namespace: Module.Namespace.OptionalIndex, -) CompileError!Value { +) CompileError!InternPool.Index { const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16770,7 +16832,7 @@ fn typeInfoDecls( }; try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(Value).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa); defer decl_vals.deinit(); var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); @@ -16778,33 +16840,39 @@ fn typeInfoDecls( if (opt_namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces); + try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces); } + const array_decl_ty = try mod.arrayType(.{ + .len = decl_vals.items.len, + .child = declaration_ty.ip_index, + .sentinel = .none, + }); const new_decl = try decls_anon_decl.finish( - try mod.arrayType(.{ - .len = decl_vals.items.len, - .child = declaration_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - decls_anon_decl.arena(), - try decls_anon_decl.arena().dupe(Value, decl_vals.items), - ), + array_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.ip_index, + .storage = .{ .elems = decl_vals.items }, + } })).toValue(), 0, // default alignment ); - return try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, decl_vals.items.len), - }); + return try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = declaration_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, - decls_anon_decl: Allocator, namespace: *Namespace, - decl_vals: *std.ArrayList(Value), + declaration_ty: Type, + decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const mod = sema.mod; @@ -16817,7 +16885,7 @@ fn typeInfoNamespaceDecls( if (decl.analysis == .in_progress) continue; try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace(mod).?; - try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); + try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces); continue; } if (decl.kind != .named) continue; @@ -16830,20 +16898,23 @@ fn typeInfoNamespaceDecls( try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(decls_anon_decl, .{ - .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const fields = try decls_anon_decl.create([2]Value); - fields.* = .{ + const fields = .{ //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub), + Value.makeBool(decl.is_pub).ip_index, }; - try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields)); + try decl_vals.append(try mod.intern(.{ .aggregate = .{ + .ty = declaration_ty.ip_index, + .storage = .{ .elems = &fields }, + } })); } } @@ -17454,10 +17525,11 @@ fn zirRetErrValue( // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(err_name), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); + const error_set_type = try mod.singleErrorSetType(err_name); + const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -17782,10 +17854,12 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(elem_ty, sema.mod)) { - break :blk .none; - } + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + else => {}, + }, + else => {}, } const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); @@ -17910,12 +17984,10 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel(mod)) |sentinel| { - const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); - return sema.addConstant(obj_ty, val); - } else { - return sema.addConstant(obj_ty, Value.initTag(.empty_array)); - } + return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ + .ty = obj_ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue()); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -18679,8 +18751,8 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const val = try ty.lazyAbiAlignment(mod, sema.arena); - if (val.isLazyAlign()) { + const val = try ty.lazyAbiAlignment(mod); + if (val.isLazyAlign(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -18704,7 +18776,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const bytes = val.castTag(.@"error").?.data.name; + const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18794,7 +18867,8 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const bytes = val.castTag(.enum_literal).?.data; + const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, .Enum => operand_ty, @@ -18883,11 +18957,8 @@ fn zirReify( .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const signedness_index = fields.getIndex("signedness").?; - const bits_index = fields.getIndex("bits").?; - - const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index); - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18896,11 +18967,8 @@ fn zirReify( }, .Vector => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -18915,9 +18983,7 @@ fn zirReify( }, .Float => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const bits_index = fields.getIndex("bits").?; - - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -18932,23 +18998,14 @@ fn zirReify( }, .Pointer => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const size_index = fields.getIndex("size").?; - const is_const_index = fields.getIndex("is_const").?; - const is_volatile_index = fields.getIndex("is_volatile").?; - const alignment_index = fields.getIndex("alignment").?; - const address_space_index = fields.getIndex("address_space").?; - const child_index = fields.getIndex("child").?; - const is_allowzero_index = fields.getIndex("is_allowzero").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index); - const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index); - const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); + const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); + const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const address_space_val = try union_val.val.fieldValue(mod, fields.getIndex("address_space").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const is_allowzero_val = try union_val.val.fieldValue(mod, fields.getIndex("is_allowzero").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19032,22 +19089,18 @@ fn zirReify( }, .Array => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); - const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { + const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); - break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; + break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); @@ -19055,9 +19108,7 @@ fn zirReify( }, .Optional => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const child_index = fields.getIndex("child").?; - - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19066,11 +19117,8 @@ fn zirReify( }, .ErrorUnion => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const error_set_index = fields.getIndex("error_set").?; - const payload_index = fields.getIndex("payload").?; - - const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index); - const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index); + const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); + const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19085,18 +19133,17 @@ fn zirReify( .ErrorSet => { const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.anyerror); - const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); + const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { - const elem_val = try slice_val.ptr.elemValue(mod, i); + const elem_val = try payload_val.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { @@ -19109,17 +19156,11 @@ fn zirReify( }, .Struct => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const backing_integer_index = fields.getIndex("backing_integer").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_tuple_index = fields.getIndex("is_tuple").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_tuple_val = try union_val.val.fieldValue(mod, fields.getIndex("is_tuple").?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19136,15 +19177,10 @@ fn zirReify( }, .Enum => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_exhaustive_index = fields.getIndex("is_exhaustive").?; - - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_exhaustive_val = try union_val.val.fieldValue(mod, fields.getIndex("is_exhaustive").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19195,7 +19231,7 @@ fn zirReify( const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, sema.arena, mod, ); @@ -19237,9 +19273,7 @@ fn zirReify( }, .Opaque => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const decls_index = fields.getIndex("decls").?; - - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19283,15 +19317,10 @@ fn zirReify( }, .Union => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19386,7 +19415,7 @@ fn zirReify( const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19489,19 +19518,12 @@ fn zirReify( }, .Fn => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const calling_convention_index = fields.getIndex("calling_convention").?; - const alignment_index = fields.getIndex("alignment").?; - const is_generic_index = fields.getIndex("is_generic").?; - const is_var_args_index = fields.getIndex("is_var_args").?; - const return_type_index = fields.getIndex("return_type").?; - const params_index = fields.getIndex("params").?; - - const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index); - const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index); - const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index); - const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index); + const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); + const is_var_args_val = try union_val.val.fieldValue(mod, fields.getIndex("is_var_args").?); + const return_type_val = try union_val.val.fieldValue(mod, fields.getIndex("return_type").?); + const params_val = try union_val.val.fieldValue(mod, fields.getIndex("params").?); const is_generic = is_generic_val.toBool(mod); if (is_generic) { @@ -19528,14 +19550,12 @@ fn zirReify( const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - const args_slice_val = params_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - + const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod)); const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { - const arg = try args_slice_val.ptr.elemValue(mod, i); + const arg = try params_val.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19676,7 +19696,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19707,7 +19727,7 @@ fn reifyStruct( } const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl()) |opt_decl| + const payload_val = if (opt_val.pointerDecl(mod)) |opt_decl| mod.declPtr(opt_decl).val else opt_val; @@ -20137,7 +20157,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = val.castTag(.@"error").?.data.name; + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20279,7 +20299,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = operand_val.toIntern(), + } })).toValue()); } return sema.addConstant(aligned_dest_ty, operand_val); } @@ -20944,7 +20967,7 @@ fn checkPtrIsNotComptimeMutable( operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(sema.mod)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } @@ -20953,7 +20976,7 @@ fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!void { if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) { if (block.runtime_cond) |cond_src| { @@ -21159,7 +21182,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.const_slice_u8; + const name_ty = Type.slice_const_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -21168,7 +21191,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.const_slice_u8; + const section_ty = Type.slice_const_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21298,12 +21321,14 @@ fn zirCmpxchg( } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { - try sema.storePtr(block, src, ptr, new_value); - break :blk Value.null; - } else try Value.Tag.opt_payload.create(sema.arena, stored_val); - - return sema.addConstant(result_ty, result_val); + const result_val = try mod.intern(.{ .opt = .{ + .ty = result_ty.ip_index, + .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + try sema.storePtr(block, src, ptr, new_value); + break :blk .none; + } else stored_val.toIntern(), + } }); + return sema.addConstant(result_ty, result_val.toValue()); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; @@ -21342,11 +21367,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); - - return sema.addConstant( - vector_ty, - try Value.Tag.repeated.create(sema.arena, scalar_val), - ); + return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val)); } try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src); @@ -21800,7 +21821,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -22081,10 +22102,15 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const payload = field_ptr_val.castTag(.field_ptr) orelse { - return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); - }; - if (payload.data.field_index != field_index) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .field => |field| field, + else => null, + }, + else => null, + } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); + + if (field.index != field_index) { const msg = msg: { const msg = try sema.errMsg( block, @@ -22093,7 +22119,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr .{ field_name, field_index, - payload.data.field_index, + field.index, parent_ty.fmt(sema.mod), }, ); @@ -22103,7 +22129,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(result_ptr, payload.data.container_ptr); + return sema.addConstant(result_ptr, field.base.toValue()); } try sema.requireRuntimeBlock(block, src, ptr_src); @@ -22335,13 +22361,13 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, mod), - .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod), + .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), + .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int), else => unreachable, }; @@ -22464,7 +22490,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { - if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -22618,7 +22644,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return; } - if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| { for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22696,6 +22722,7 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; @@ -22737,32 +22764,17 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - const new_var = try sema.gpa.create(Module.Var); - errdefer sema.gpa.destroy(new_var); - - log.debug("created variable {*} owner_decl: {*} ({s})", .{ - new_var, sema.owner_decl, sema.owner_decl.name, - }); - - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = init_val, + return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ + .ty = var_ty.ip_index, + .init = init_val.toIntern(), + .decl = sema.owner_decl_index, + .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( + sema.gpa, + try sema.handleExternLibName(block, ty_src, lname), + )).toOptional() else .none, .is_extern = small.is_extern, - .is_mutable = true, .is_threadlocal = small.is_threadlocal, - .is_weak_linkage = false, - .lib_name = null, - }; - - if (lib_name) |lname| { - new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname); - } - - const result = try sema.addConstant( - var_ty, - try Value.Tag.variable.create(sema.arena, new_var), - ); - return result; + } })).toValue()); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -22861,7 +22873,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.const_slice_u8; + const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -23133,10 +23145,10 @@ fn resolveExternOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExternOptions { + const mod = sema.mod; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); - const mod = sema.mod; const name_src = sema.maybeOptionsSrc(block, src, "name"); const library_src = sema.maybeOptionsSrc(block, src, "library"); @@ -23145,7 +23157,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -23157,9 +23169,8 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull(mod)) blk: { - const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { + const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -23227,40 +23238,36 @@ fn zirBuiltinExtern( new_decl.name = try sema.gpa.dupeZ(u8, options.name); { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const new_var = try new_decl_arena_allocator.create(Module.Var); - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = Value.@"unreachable", + const new_var = try mod.intern(.{ .variable = .{ + .ty = ty.ip_index, + .init = .none, + .decl = sema.owner_decl_index, .is_extern = true, - .is_mutable = false, + .is_const = true, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .Weak, - .lib_name = null, - }; + } }); new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.anyopaque; - new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); + new_decl.ty = ty; + new_decl.val = new_var.toValue(); new_decl.@"align" = 0; new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - - try new_decl.finalizeNewArena(&new_decl_arena); } try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - return sema.addConstant(ty, ref); + const ref = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = new_decl_index }, + } }); + return sema.addConstant(ty, ref.toValue()); } fn zirWorkItem( @@ -24117,7 +24124,6 @@ fn fieldVal( const mod = sema.mod; const gpa = sema.gpa; - const arena = sema.arena; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24221,13 +24227,14 @@ fn fieldVal( else => unreachable, } - return sema.addConstant( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }), - ); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue()); }, .Union => { if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { @@ -24368,14 +24375,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.ptr_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_ptr_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24389,14 +24395,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.len_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_len_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24442,14 +24447,16 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ - .name = ip.stringToSlice(name), - }), + error_set_type, + (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue(), 0, // default alignment )); }, @@ -24714,14 +24721,13 @@ fn finishFieldCallBind( } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const pointer = try sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = container_ty, - .field_index = field_index, - }), - ); + const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = struct_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -24901,22 +24907,22 @@ fn structFieldPtrByIndex( const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field.ty, - .field_val = try field.default_val.copy(sema.arena), - }); - return sema.addConstant(ptr_field_ty, val); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(mod), - .field_index = field_index, - }), - ); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = try struct_ptr_val.intern(struct_ptr_ty, mod), + .index = field_index, + } }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24955,7 +24961,7 @@ fn structFieldVal( if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index)); + return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); @@ -24999,7 +25005,7 @@ fn tupleFieldIndex( field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - assert(!std.mem.eql(u8, field_name, "len")); + assert(!mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ @@ -25109,14 +25115,13 @@ fn unionFieldPtr( }, .Packed, .Extern => {}, } - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = union_ptr_val, - .container_ty = union_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -25267,7 +25272,7 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -25313,7 +25318,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } @@ -25407,22 +25412,20 @@ fn tupleFieldPtr( }); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field_ty, - .field_val = default_val, - }); - return sema.addConstant(ptr_field_ty, val); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = default_val.ip_index }, + } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = tuple_ptr_val, - .container_ty = tuple_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = tuple_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } if (!init) { @@ -25463,7 +25466,7 @@ fn tupleField( if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25575,7 +25578,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25631,7 +25634,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -25691,7 +25694,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25851,7 +25854,7 @@ fn coerceExtra( // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -26080,14 +26083,14 @@ fn coerceExtra( if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = if (dest_info.@"align" != 0) + return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ + .ty = dest_ty.ip_index, + .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = try mod.intValue(Type.usize, 0), - }); - return sema.addConstant(dest_ty, slice_val); + try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, + .len = (try mod.intValue(Type.usize, 0)).ip_index, + } })).toValue()); } // pointer to tuple to slice @@ -26255,7 +26258,8 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const bytes = val.castTag(.enum_literal).?.data; + const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { const msg = try sema.errMsg( @@ -26292,26 +26296,30 @@ fn coerceExtra( if (maybe_inst_val) |inst_val| { switch (inst_val.ip_index) { .undef => return sema.addConstUndef(dest_ty), - .none => switch (inst_val.tag()) { - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(mod), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; + else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| { + const error_set_ty = inst_ty.errorUnionSet(mod); + const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ + .ty = error_set_ty.ip_index, + .name = err_name, + } })).toValue()); + return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); + }, + .payload => |payload| { + const payload_val = try sema.addConstant( + inst_ty.errorUnionPayload(mod), + payload.toValue(), + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, }, - else => {}, + else => unreachable, }, - else => {}, } - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(mod), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); } }, .ErrorSet => { @@ -27029,7 +27037,7 @@ fn coerceInMemoryAllowedErrorSets( }, } - if (dst_ies.func == sema.owner_func) { + if (dst_ies.func == sema.owner_func_index.unwrap()) { // We are trying to coerce an error set to the current function's // inferred error set. try dst_ies.addErrorSet(src_ty, ip, gpa); @@ -27323,7 +27331,7 @@ fn coerceVarArgParam( ), .Fn => blk: { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; break :blk try sema.analyzeDeclRef(fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), @@ -27441,7 +27449,7 @@ fn storePtr2( try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; @@ -27593,7 +27601,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27619,12 +27627,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27637,6 +27645,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { + if (true) unreachable; const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { @@ -28169,7 +28178,7 @@ fn beginComptimePtrMutation( }, } }, - .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already else => unreachable, } } @@ -28189,7 +28198,7 @@ fn beginComptimePtrMutationInner( const decl = mod.declPtr(decl_ref_mut.decl_index); var decl_arena: std.heap.ArenaAllocator = undefined; - const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); decl_val.* = try decl_val.unintern(allocator, mod); @@ -28273,44 +28282,83 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); - }, - - .none => switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; + if (decl.getVariable(mod) != null) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = is_mutable, + .is_mutable = false, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, + .int => return error.RuntimeLoad, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + // eu_payload and opt_payload never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } + + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => opt.val, + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); // This code assumes that elem_ptrs have been "flattened" in order for direct dereference // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, } if (elem_ptr.index != 0) { @@ -28327,7 +28375,7 @@ fn beginComptimePtrLoad( } } - // If we're loading an elem_ptr that was derived from a different type + // If we're loading an elem that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(mod); @@ -28373,31 +28421,25 @@ fn beginComptimePtrLoad( }; break :blk deref; }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, - - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - - if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(field_ptr.container_ty); + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); if (struct_obj != null and struct_obj.?.layout == .Packed) { // packed structs are not byte addressable deref.parent = null; } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; + deref.ty_without_well_defined_layout = container_ty; } const tv = deref.pointee orelse { @@ -28405,294 +28447,40 @@ fn beginComptimePtrLoad( break :blk deref; }; const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (!coerce_in_mem_ok) { deref.pointee = null; break :blk deref; } - if (field_ptr.container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; + if (container_ty.isSlice(mod)) { deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, + Value.slice_ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = tv.val.slicePtr(mod), }, - Value.Payload.Slice.len_index => TypedValue{ + Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = slice_val.len, + .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), }, else => unreachable, }; } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index, mod); + const field_ty = container_ty.structFieldType(field_index, mod); deref.pointee = TypedValue{ .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(mod, field_index), }; } break :blk deref; }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod), - .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); - }, - - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - - else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => return error.RuntimeLoad, - .ptr => |ptr| switch (ptr.addr) { - .@"var", .int => return error.RuntimeLoad, - .decl, .mut_decl => blk: { - const decl_index = switch (ptr.addr) { - .decl => |decl| decl, - .mut_decl => |mut_decl| mut_decl.decl, - else => unreachable, - }; - const decl = mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; - - const layout_defined = decl.ty.hasWellDefinedLayout(mod); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = false, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; - }, - .eu_payload, .opt_payload => |container_ptr| blk: { - const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); - const payload_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .comptime_field => |comptime_field| blk: { - const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, - .is_mutable = false, - .ty_without_well_defined_layout = field_ty, - }; - }, - .elem => |elem_ptr| blk: { - const elem_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); - - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - switch (mod.intern_pool.indexToKey(elem_ptr.base)) { - .ptr => |base_ptr| switch (base_ptr.addr) { - .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), - else => {}, - }, - else => {}, - } - - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout(mod)) { - if (deref.parent) |*parent| { - // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; - } - } - - // If we're loading an elem that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(mod); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, mod), - .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; - break :blk deref; - } - } - - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel(mod)) |sent| { - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, - }; - break :blk deref; - } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, elem_ptr.index), - }; - break :blk deref; - }, - .field => |field_ptr| blk: { - const field_index = @intCast(u32, field_ptr.index); - const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - - if (container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(container_ty); - if (struct_obj != null and struct_obj.?.layout == .Packed) { - // packed structs are not byte addressable - deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(container_ty); - const field_offset = container_ty.structFieldOffset(field_index, mod); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { - deref.pointee = null; - break :blk deref; - } - - if (container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, - }; - } else { - const field_ty = container_ty.structFieldType(field_index, mod); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), - }; - } - break :blk deref; - }, - }, - else => unreachable, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null), }, + else => unreachable, }; if (deref.pointee) |tv| { @@ -28853,7 +28641,7 @@ fn coerceCompatiblePtrs( } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try val.intern(inst_ty, mod), dest_ty.ip_index, )).toValue()); @@ -29538,7 +29326,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { +fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; @@ -29550,6 +29338,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { } fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { + const mod = sema.mod; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( @@ -29558,15 +29347,23 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); - return try Value.Tag.decl_ref.create(sema.arena, decl); + try mod.declareDeclDependency(sema.owner_decl_index, decl); + const result = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = decl }, + } }); + return result.toValue(); } fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { + const mod = sema.mod; const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); - const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val); - return result; + const result = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, + .val = ptr_val.ip_index, + } }); + return result.toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -29587,10 +29384,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.ip_index, .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl_tv.val.castTag(.variable)) |payload| - !payload.data.is_mutable - else - false, + .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { @@ -29608,8 +29402,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { const tv = try decl.typedValue(); if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try mod.ensureFuncBodyAnalysisQueued(func.data); + const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + try mod.ensureFuncBodyAnalysisQueued(func_index); } fn analyzeRef( @@ -29622,14 +29416,12 @@ fn analyzeRef( if (try sema.resolveMaybeUndefVal(operand)) |val| { switch (val.ip_index) { - .none => switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); - }, + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), else => {}, }, - else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29854,7 +29646,7 @@ fn analyzeIsNonErrComptimeOnly( if (other_ies.errors.count() != 0) break :blk; } - if (ies.func == sema.owner_func) { + if (ies.func == sema.owner_func_index.unwrap()) { // We're checking the inferred errorset of the current function and none of // its child inferred error sets contained any errors meaning that any value // so far with this type can't contain errors either. @@ -29873,7 +29665,7 @@ fn analyzeIsNonErrComptimeOnly( if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError() == null) { + if (err_union.getError(mod) == null) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -30137,7 +29929,7 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -30233,7 +30025,7 @@ fn analyzeSlice( if (!new_ptr_val.isUndef(mod)) { return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), return_ty.ip_index, )).toValue()); @@ -30753,7 +30545,10 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = val.ip_index, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30771,7 +30566,10 @@ fn wrapErrorUnionPayload( const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.ip_index, + .val = .{ .payload = val.ip_index }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); try sema.queueFullTypeResolution(dest_payload_ty); @@ -30794,27 +30592,20 @@ fn wrapErrorUnionSet( .anyerror_type => {}, else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { .error_set_type => |error_set_type| ok: { - const expected_name = val.castTag(.@"error").?.data.name; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (error_set_type.nameIndex(ip, expected_name_interned) != null) - break :ok; - } + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = val.castTag(.@"error").?.data.name; + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. if (ies.is_anyerror) break :ok; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (ies.errors.contains(expected_name_interned)) break :ok; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, @@ -31462,43 +31253,33 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { switch (val.ip_index) { - .none => switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); - } - }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), + }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); }, - else => return, + else => {}, }, - else => return, } } @@ -31597,7 +31378,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { else blk: { const decl = mod.declPtr(struct_obj.owner_decl); var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); }; @@ -31662,18 +31443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var sema: Sema = .{ - .mod = mod, - .gpa = gpa, - .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, - .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, - .func = null, - .fn_ret_ty = Type.void, - .owner_func = null, - }; + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); @@ -31720,8 +31490,10 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -31974,16 +31746,23 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -32141,8 +31920,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, .anyerror_void_error_union_type, .generic_poison_type, .empty_struct_type, @@ -32288,18 +32067,19 @@ fn resolveInferredErrorSet( if (ies.is_resolved) return; - if (ies.func.state == .in_progress) { + const func = mod.funcPtr(ies.func); + if (func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which points to the same - // `*Module.Fn`. Not only is the function not relevant to the inferred error set + // each call gets a new InferredErrorSet object, which contains the same + // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_owner_decl = mod.declPtr(func.owner_decl); const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, @@ -32414,8 +32194,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -32754,8 +32536,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -33111,7 +32895,7 @@ fn generateUnionTagTypeNumbered( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33160,7 +32944,7 @@ fn generateUnionTagTypeSimple( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33288,19 +33072,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_error_set_type, => null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { - return Value.initTag(.the_only_possible_value); + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); } return null; }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; - return null; - }, .opt_type => |child| { if (child == .noreturn_type) { return try mod.nullValue(ty); @@ -33466,16 +33250,23 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -33625,10 +33416,13 @@ fn analyzeComptimeAlloc( decl.@"align" = alignment; try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); - return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .runtime_index = block.runtime_index, - .decl_index = decl_index, - })); + return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_type.ip_index, + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); } /// The places where a user can specify an address space attribute @@ -33969,16 +33763,23 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -34337,8 +34138,9 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; + if (ty.ip_index == .comptime_int_type) return true; + const info = ty.intInfo(mod); switch (val.ip_index) { .undef, .zero, @@ -34346,40 +34148,8 @@ fn intFitsInType( .zero_u8, => return true, - .none => switch (val.tag()) { - .lazy_align => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .lazy_size => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - - .the_only_possible_value => { - assert(ty.intInfo(mod).bits == 0); - return true; - }, - - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => { - const info = ty.intInfo(mod); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { @@ -34387,27 +34157,51 @@ fn intFitsInType( .unsigned => info.bits >= ptr_bits, }; }, - - .aggregate => { - assert(ty.zigTypeTag(mod) == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { - if (vector_index) |some| some.* = i; - return false; - } - } - return true; + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .lazy_align => |lazy_ty| { + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .lazy_size => |lazy_ty| { + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, }, - - else => unreachable, - }, - - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.fitsInTwosComp(info.signedness, info.bits); + .aggregate => |aggregate| { + assert(ty.zigTypeTag(mod) == .Vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes, 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; }, else => unreachable, }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 569c1430d5..2222c1060e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -102,248 +102,15 @@ pub fn print( return writer.writeAll(" }"); }, - .the_only_possible_value => return writer.writeAll("0"), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(mod); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(mod); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, - }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, - }, writer, level - 1, mod); - } - - if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) { - .anon_struct_type => |anon_struct| { - if (anon_struct.names.len == 0) { - return writer.print(".@\"{d}\"", .{field_ptr.field_index}); - } - }, - else => {}, - } - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice(mod)) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } - }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(mod), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(mod), - .val = ty.sentinel(mod).?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(mod); - const len = payload.len.toUnsignedInt(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - if (elem_val.isUndef(mod)) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - try print(.{ - .ty = elem_ty, - .val = elem_val, - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(mod); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - if (level == 0) { - return writer.writeAll("(ptr)"); - } - - const data = val.castTag(.eu_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - .opt_payload_ptr => { - if (level == 0) { - return writer.writeAll("&(ptr)"); - } - - const data = val.castTag(.opt_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .runtime_value => return writer.writeAll("[runtime value]"), }, else => { const key = mod.intern_pool.indexToKey(val.ip_index); @@ -353,6 +120,12 @@ pub fn print( switch (key) { .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), }, .enum_tag => |enum_tag| { if (level == 0) { @@ -407,7 +180,7 @@ fn printAggregate( } try print(.{ .ty = ty.structFieldType(i, mod), - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount(mod) > max_aggregate_items) { @@ -424,7 +197,7 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = try val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(mod, i); if (elem.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -441,7 +214,7 @@ fn printAggregate( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 45a6fae90b..3afff5ba6a 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2108,8 +2108,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index c9126747da..faf158e2a4 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -328,7 +328,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,6 +339,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4311,9 +4312,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4353,10 +4352,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4627,7 +4625,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index fa8646be43..778662fe86 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -334,7 +334,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,6 +345,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4291,9 +4292,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4308,7 +4307,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (func_value.getExternFunc(mod)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -4573,7 +4572,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index faa2b2b7d0..a9cd130fa8 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -217,7 +217,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,6 +228,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1745,8 +1746,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -1760,7 +1760,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .imm12 = 0, } }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1879,7 +1879,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 13f129f87b..dc086dc00f 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -260,7 +260,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,6 +271,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1346,8 +1347,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // on linking. if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -1374,7 +1374,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .nop, .data = .{ .nop = {} }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1663,7 +1663,8 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2d7e4a8585..66c0399343 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1203,20 +1203,22 @@ fn genFunctype( pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; + const mod = bin_file.options.module.?; + const func = mod.funcPtr(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .code = code, .decl_index = func.owner_decl, - .decl = bin_file.options.module.?.declPtr(func.owner_decl), + .decl = mod.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -2196,27 +2198,33 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; - if (func_val.castTag(.function)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); - break :blk function.data.owner_decl; - } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = mod.declPtr(extern_fn.data.owner_decl); + if (func_val.getFunction(mod)) |function| { + _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + break :blk function.owner_decl; + } else if (func_val.getExternFunc(mod)) |extern_func| { + const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.ty).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type); + const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), atom.getSymbolIndex().?, - ext_decl.getExternFn().?.lib_name, + mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name), type_index, ); - break :blk extern_fn.data.owner_decl; - } else if (func_val.castTag(.decl_ref)) |decl_ref| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data); - break :blk decl_ref.data; + break :blk extern_func.decl; + } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| { + _ = try func.bin_file.getOrCreateAtomForDecl(decl); + break :blk decl; + }, + else => {}, + }, + else => {}, } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2932,29 +2940,41 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { +fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; + switch (ptr.addr) { + .decl => |decl_index| { + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); + }, + .mut_decl => |mut_decl| { + const decl_index = mut_decl.decl; + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .decl_ref => { - const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .opt_payload => |base_ptr| { + return func.lowerParentPtr(base_ptr.toValue()); }, - .variable => { - const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .comptime_field => unreachable, + .elem => |elem| { + const index = elem.index; + const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + const offset = index * elem_type.abiSize(mod); + const array_ptr = try func.lowerParentPtr(elem.base.toValue()); + + return WValue{ .memory_offset = .{ + .pointer = array_ptr.memory, + .offset = @intCast(u32, offset), + } }; }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_ty = field_ptr.container_ty; + .field => |field| { + const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const parent_ptr = try func.lowerParentPtr(field.base.toValue()); - const field_offset = switch (parent_ty.zigTypeTag(mod)) { + const offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), - else => parent_ty.structFieldOffset(field_ptr.field_index, mod), + .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod), + else => parent_ty.structFieldOffset(field.index, mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, @@ -2964,12 +2984,12 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk field_offset; + const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk offset; }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { - .Slice => switch (field_ptr.field_index) { + .Slice => switch (field.index) { 0 => 0, 1 => func.ptrSize(), else => unreachable, @@ -2978,19 +2998,23 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); - return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); - }, - .opt_payload_ptr => { - const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, offset); + + return switch (parent_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, - else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } @@ -3045,21 +3069,97 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } if (val.isUndefDeep(mod)) return func.emitUndefined(ty); - if (val.castTag(.decl_ref)) |decl_ref| { - const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - switch (ty.zigTypeTag(mod)) { - .Void => return WValue{ .none = {} }, - .Int => { + + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { + .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .Struct => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + .Vector => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .Frame, + .AnyFrame, + => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => return WValue{ .imm32 = switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + } }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { @@ -3080,86 +3180,71 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, - 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, - 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, - else => unreachable, - }, - .Pointer => return switch (val.ip_index) { - .null_value => WValue{ .imm32 = 0 }, - .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, - else => unreachable, - }, - }, - .Enum => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); - }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?); - return WValue{ .imm32 = kv.value }; - }, - else => return WValue{ .imm32 = 0 }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return WValue{ .imm32 = kv.value }; }, - .ErrorUnion => { + .error_union => { const error_type = ty.errorUnionSet(mod); const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(); + const is_pl = val.errorUnionIsPayload(mod); const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload(mod)) { + .enum_tag => |enum_tag| { + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f32 => |f32_val| return WValue{ .float32 = f32_val }, + .f64 => |f64_val| return WValue{ .float64 = f64_val }, + else => unreachable, + }, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), + .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), + .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), + .opt_payload, .elem, .field => return func.lowerParentPtr(val), + else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), + }, + .opt => if (ty.optionalReprIsPayload(mod)) { const pl_ty = ty.optionalChild(mod); - if (val.castTag(.opt_payload)) |payload| { - return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull(mod)) { - return WValue{ .imm32 = 0 }; + if (val.optionalValue(mod)) |payload| { + return func.lowerConstant(payload, pl_ty); } else { - return func.lowerConstant(val, pl_ty); + return WValue{ .imm32 = 0 }; } } else { - const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = @boolToInt(is_pl) }; - }, - .Struct => { - const struct_obj = mod.typeToStruct(ty).?; - assert(struct_obj.layout == .Packed); - var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - const int_val = try mod.intValue( - struct_obj.backing_int_ty, - std.mem.readIntLittle(u64, &buf), - ); - return func.lowerConstant(int_val, struct_obj.backing_int_ty); + return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) }; }, - .Vector => { - assert(determineSimdStoreStrategy(ty, mod) == .direct); - var buf: [16]u8 = undefined; - val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; - return func.storeSimdImmd(buf); - }, - .Union => { - // in this case we have a packed union which will not be passed by reference. - const union_ty = mod.typeToUnion(ty).?; - const union_obj = val.castTag(.@"union").?.data; - const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; - const field_ty = union_ty.fields.values()[field_index].ty; - return func.lowerConstant(union_obj.val, field_ty); + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .vector_type => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .struct_type, .anon_struct_type => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + else => unreachable, }, - else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), } } @@ -3221,31 +3306,33 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .bool_true => return 1, .bool_false => return 0, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int), - .int => |int| intStorageAsI32(int.storage), - .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int), + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), + .int => |int| intStorageAsI32(int.storage, mod), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), else => unreachable, }, } switch (ty.zigTypeTag(mod)) { .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function + const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, else => unreachable, // Programmer called this function for an illegal type } } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 { - return intStorageAsI32(ip.indexToKey(int).int.storage); +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, mod); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { return switch (storage) { .i64 => |x| @intCast(i32, x), .u64 => |x| @bitCast(i32, @intCast(u32, x)), .big_int => unreachable, + .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), }; } @@ -5514,7 +5601,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.const_slice_u8_sentinel_0; + const name_ty = Type.slice_const_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -6935,7 +7022,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 77b4e6d425..4a5532a239 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -632,7 +632,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -643,6 +643,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -687,7 +688,7 @@ pub fn generate( @enumToInt(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack| + .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| set_align_stack.alignment else 1, @@ -2760,19 +2761,18 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = src_ty.childType(mod); const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); - var splat_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = mask_val, - }; - const splat_val = Value.initPayload(&splat_pl.base); - - const full_ty = try mod.vectorType(.{ + const splat_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), .child = elem_ty.ip_index, }); - const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); + const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); + + const splat_val = try mod.intern(.{ .aggregate = .{ + .ty = splat_ty.ip_index, + .storage = .{ .repeated_elem = mask_val.ip_index }, + } }); - const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() }); const splat_addr_mcv: MCValue = switch (splat_mcv) { .memory, .indirect, .load_frame => splat_mcv.address(), else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, @@ -2784,14 +2784,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -4893,23 +4893,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - var arena = std.heap.ArenaAllocator.init(self.gpa); - defer arena.deinit(); - - const ExpectedContents = struct { - repeated: Value.Payload.SubValue, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const vec_ty = try mod.vectorType(.{ .len = @divExact(abi_size * 8, scalar_bits), .child = (try mod.intType(.signed, scalar_bits)).ip_index, }); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), mod), - .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty), + .neg => try vec_ty.minInt(mod), + .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; @@ -8106,13 +8097,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (if (func_value.castTag(.function)) |func_payload| - func_payload.data.owner_decl - else if (func_value.castTag(.decl_ref)) |decl_ref_payload| - decl_ref_payload.data - else - null) |owner_decl| - { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (switch (func_key) { + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }) |owner_decl| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); @@ -8145,10 +8138,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, fn_got_addr), })); } else unreachable; - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); @@ -8554,7 +8546,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen.zig b/src/codegen.zig index 775eb09ab0..b9b7dac90f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -14,6 +14,7 @@ const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; +const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); const Target = std.Target; @@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -75,17 +76,17 @@ pub fn generateFunction( switch (bin_file.options.target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } @@ -182,12 +183,13 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (arg_tv.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -199,35 +201,10 @@ pub fn generateSymbol( if (typed_value.val.isUndefDeep(mod)) { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); - return Result.ok; + return .ok; } - switch (typed_value.ty.zigTypeTag(mod)) { - .Fn => { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol function pointers", - .{}, - ), - }; - }, - .Float => { - switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), - 80 => { - writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); - }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), - else => unreachable, - } - return Result.ok; - }, + if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; @@ -248,62 +225,6 @@ pub fn generateSymbol( } return Result.ok; }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const sentinel = typed_value.ty.sentinel(mod); - const len = typed_value.ty.arrayLen(mod); - - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (sentinel) |sentinel_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - return Result.ok; - }, - .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(mod); - const sentinel_val = typed_value.ty.sentinel(mod).?; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -313,195 +234,6 @@ pub fn generateSymbol( ), }, }, - .Pointer => switch (typed_value.val.ip_index) { - .null_value => { - switch (target.ptrBitWidth()) { - 32 => { - mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4); - }, - 64 => { - mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8); - }, - else => unreachable, - } - return Result.ok; - }, - .none => switch (typed_value.val.tag()) { - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; - - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { - .int => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - else => unreachable, - }, - }, - .Int => { - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), - }; - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const start = code.items.len; - try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); - return Result.ok; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Enum => { - const int_val = try typed_value.enumToInt(mod); - - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(mod)); - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for big int enums ('{}')", - .{typed_value.ty.fmt(mod)}, - ), - }; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool(mod)); - try code.append(x); - return Result.ok; - }, .Struct => { if (typed_value.ty.containerLayout(mod) == .Packed) { const struct_obj = mod.typeToStruct(typed_value.ty).?; @@ -562,370 +294,497 @@ pub fn generateSymbol( return Result.ok; }, - .Union => { - const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(mod); + .Vector => switch (typed_value.val.tag()) { + .bytes => { + const bytes = typed_value.val.castTag(.bytes).?.data; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(len + padding); + code.appendSliceAssumeCapacity(bytes[0..len]); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + .str_lit => { + const str_lit = typed_value.val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(str_lit.len + padding); + code.appendSliceAssumeCapacity(bytes); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + else => unreachable, + }, + .Frame, + .AnyFrame, + => return .{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO generateSymbol for type {}", + .{typed_value.ty.fmt(mod)}, + ) }, + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; - if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info); + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try code.append(switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + }), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + var space: Value.BigIntSpace = undefined; + const val = typed_value.val.toBigInt(&space, mod); + val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); + }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try code.writer().writeInt(u16, @intCast(u16, kv.value), endian); + }, + .error_union => |error_union| { + const payload_ty = typed_value.ty.errorUnionPayload(mod); + + const err_val = switch (error_union.val) { + .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value), + .payload => @as(u16, 0), + }; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + try code.writer().writeInt(u16, err_val, endian); + return .ok; } - // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); + + // error value first when its type is larger than the error union's payload + if (error_align > payload_align) { + try code.writer().writeInt(u16, err_val, endian); } - const union_ty = mod.typeToUnion(typed_value.ty).?; - const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; - assert(union_ty.haveFieldTypes()); - const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); - } else { + // emit payload part of the error union + { + const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = union_obj.val, + .ty = payload_ty, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, - .fail => |em| return Result{ .fail = em }, + .fail => |em| return .{ .fail = em }, } + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - if (layout.tag_size > 0) { + // Payload size is larger than error set, so emit our error set last + if (error_align <= payload_align) { + const begin = code.items.len; + try code.writer().writeInt(u16, err_val, endian); + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + }, + .enum_tag => |enum_tag| { + const int_tag_ty = try typed_value.ty.intTagType(mod); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = int_tag_ty, + .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f80 => |f80_val| { + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + try code.appendNTimes(0, abi_size - 10); + }, + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), + }, + .ptr => |ptr| { + // generate ptr + switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { + .none => typed_value.val, + else => typed_value.val.slicePtr(mod), + }.ip_index, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + if (ptr.len != .none) { + // generate len switch (try generateSymbol(bin_file, src_loc, .{ - .ty = union_ty.tag_ty, - .val = union_obj.tag, + .ty = Type.usize, + .val = ptr.len.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); - } - - return Result.ok; }, - .Optional => { + .opt => { const payload_type = typed_value.ty.optionalChild(mod); - const is_pl = !typed_value.val.isNull(mod); + const payload_val = typed_value.val.optionalValue(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); - return Result.ok; - } - if (typed_value.ty.optionalReprIsPayload(mod)) { - if (typed_value.val.castTag(.opt_payload)) |payload| { + if (payload_val) |value| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = payload.data, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull(mod)) { + } else { + try code.writer().writeByteNTimes(0, abi_size); + } + } else { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(mod)) { + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue(); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = typed_value.val, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else { - try code.writer().writeByteNTimes(0, abi_size); } - - return Result.ok; + try code.writer().writeByte(@boolToInt(payload_val != null)); + try code.writer().writeByteNTimes(0, padding); } + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) { + .array_type => |array_type| { + var index: u64 = 0; + while (index < array_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = value, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - try code.writer().writeByte(@boolToInt(is_pl)); - try code.writer().writeByteNTimes(0, padding); + if (array_type.sentinel != .none) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = array_type.sentinel.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, + .vector_type => |vector_type| { + var index: u32 = 0; + while (index < vector_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - return Result.ok; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) { + error.DivisionByZero => unreachable, + else => |e| return e, + })) orelse return error.Overflow; + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + }, + .struct_type, .anon_struct_type => { + if (typed_value.ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(typed_value.ty).?; + const fields = struct_obj.fields.values(); + const field_vals = typed_value.val.castTag(.aggregate).?.data; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + const current_pos = code.items.len; + try code.resize(current_pos + abi_size); + var bits: u16 = 0; + + for (field_vals, 0..) |field_val, index| { + const field_ty = fields[index].ty; + // pointer may point to a decl which must be marked used + // but can also result in a relocation. Therefore we handle those seperately. + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; + var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); + defer tmp_list.deinit(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, &tmp_list, debug_output, reloc_info)) { + .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), + .fail => |em| return Result{ .fail = em }, + } + } else { + field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + } + bits += @intCast(u16, field_ty.bitSize(mod)); + } + } else { + const struct_begin = code.items.len; + const field_vals = typed_value.val.castTag(.aggregate).?.data; + for (field_vals, 0..) |field_val, index| { + const field_ty = typed_value.ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBits(mod)) continue; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + } + }, + else => unreachable, }, - .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(mod); - const payload_ty = typed_value.ty.errorUnionPayload(mod); - const is_payload = typed_value.val.errorUnionIsPayload(); + .un => |un| { + const layout = typed_value.ty.unionGetLayout(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; + if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = err_val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const abi_align = typed_value.ty.abiAlignment(mod); - - // error value first when its type is larger than the error union's payload - if (error_align > payload_align) { + // Check if we should store the tag first. + if (layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - // emit payload part of the error union - { - const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef; + const union_ty = mod.typeToUnion(typed_value.ty).?; + const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + assert(union_ty.haveFieldTypes()); + const field_ty = union_ty.fields.values()[field_index].ty; + if (!field_ty.hasRuntimeBits(mod)) { + try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + } else { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_ty, - .val = payload_val, + .ty = field_ty, + .val = un.val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - // Payload size is larger than error set, so emit our error set last - if (error_align <= payload_align) { - const begin = code.items.len; + if (layout.tag_size > 0) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = union_ty.tag_ty, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const name = typed_value.val.getError().?; - const kv = try bin_file.options.module.?.getErrorValue(name); - try code.writer().writeInt(u32, kv.value, endian); - }, - else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); - }, } - return Result.ok; }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = typed_value.ty.arrayLen(mod); - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(tag)}, - ) }, } + return .ok; } fn lowerParentPtr( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, - parent_ptr: Value, + parent_ptr: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { const mod = bin_file.options.module.?; - switch (parent_ptr.tag()) { - .field_ptr => { - const field_ptr = parent_ptr.castTag(.field_ptr).?.data; + const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; + assert(ptr.len == .none); + return switch (ptr.addr) { + .decl, .mut_decl => try lowerDeclRef( + bin_file, + src_loc, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + .int => |int| try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = int.toValue(), + }, code, debug_output, reloc_info), + .eu_payload => |eu_payload| try lowerParentPtr( + bin_file, + src_loc, + eu_payload, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + mod.intern_pool.typeOf(eu_payload).toType(), + mod, + ))), + ), + .opt_payload => |opt_payload| try lowerParentPtr( + bin_file, + src_loc, + opt_payload, + code, + debug_output, + reloc_info, + ), + .elem => |elem| try lowerParentPtr( + bin_file, + src_loc, + elem.base, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + ), + .field => |field| { + const base_type = mod.intern_pool.typeOf(field.base); return lowerParentPtr( bin_file, src_loc, - typed_value, - field_ptr.container_ptr, + field.base, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { - .Pointer => offset: { - assert(field_ptr.container_ty.isSlice(mod)); - break :offset switch (field_ptr.field_index) { + reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod), + 1 => @divExact(mod.getTarget().ptrBitWidth(), 8), else => unreachable, - }; + }, }, - .Struct, .Union => field_ptr.container_ty.structFieldOffset( - field_ptr.field_index, + .struct_type, + .anon_struct_type, + .union_type, + => @intCast(u32, base_type.toType().childType(mod).structFieldOffset( + @intCast(u32, field.index), mod, - ), - else => return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for field_ptr with a container of type {}", - .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, - ) }, - })), - ); - }, - .elem_ptr => { - const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - elem_ptr.array_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), - ); - }, - .opt_payload_ptr => { - const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - opt_payload_ptr.container_ptr, - code, - debug_output, - reloc_info, - ); - }, - .eu_payload_ptr => { - const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - eu_payload_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), + )), + else => unreachable, + }), ); }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => parent_ptr.castTag(.variable).?.data.owner_decl, - .decl_ref => parent_ptr.castTag(.decl_ref).?.data, - .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(tag)}, - ) }, - } + .comptime_field => unreachable, + }; } const RelocInfo = struct { @@ -940,36 +799,15 @@ const RelocInfo = struct { fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { + _ = src_loc; + _ = debug_output; const target = bin_file.options.target; const mod = bin_file.options.module.?; - if (typed_value.ty.isSlice(mod)) { - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - } const ptr_width = target.ptrBitWidth(); const decl = mod.declPtr(decl_index); @@ -1154,12 +992,13 @@ pub fn genTypedValue( arg_tv: TypedValue, owner_decl_index: Module.Decl.Index, ) CodeGenError!GenResult { + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (typed_value.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), @@ -1171,17 +1010,14 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice(mod)) { - if (typed_value.val.castTag(.variable)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); - } - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); - } - } + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), + .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), + else => {}, + }, + else => {}, + }; switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), @@ -1215,11 +1051,9 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { - if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(mod), - .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, + .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }), }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); @@ -1234,24 +1068,15 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return GenResult.mcv(.{ .immediate = error_index }); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return GenResult.mcv(.{ .immediate = 0 }); - }, - } + const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name); + const global_error_set = mod.global_error_set; + const error_index = global_error_set.get(err_name).?; + return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(mod); const payload_type = typed_value.ty.errorUnionPayload(mod); - const is_pl = typed_value.val.errorUnionIsPayload(); + const is_pl = typed_value.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d3b8e06e5d..1bb8130b1f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -257,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `*Module.Fn`. +/// This data is available when outputting .c code for a `Module.Fn.Index`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -268,7 +268,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func: *Module.Fn, + func_index: Module.Fn.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -549,33 +549,12 @@ pub const DeclGen = struct { } // Chase function values in order to be able to reference the original function. - inline for (.{ .function, .extern_fn }) |tag| - if (decl.val.castTag(tag)) |func| - if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); + if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index) + return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); + if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.val.castTag(.variable)) |var_payload| - try dg.renderFwdDecl(decl_index, var_payload.data); - - if (ty.isSlice(mod)) { - if (location == .StaticInitializer) { - try writer.writeByte('{'); - } else { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeAll("){ .ptr = "); - } - - try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer); - - const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); - - if (location == .StaticInitializer) { - return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } else { - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } - } + if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -594,125 +573,77 @@ pub const DeclGen = struct { /// Renders a "parent" pointer by recursing to the root decl/variable /// that its contents are defined with respect to. - /// - /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr( dg: *DeclGen, writer: anytype, - ptr_val: Value, - ptr_ty: Type, + ptr_val: InternPool.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; - - if (!ptr_ty.isSlice(mod)) { - try writer.writeByte('('); - try dg.renderType(writer, ptr_ty); - try writer.writeByte(')'); - } - if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), - else => unreachable, - }; - switch (ptr_val.tag()) { - .decl_ref_mut, .decl_ref, .variable => { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - .variable => ptr_val.castTag(.variable).?.data.owner_decl, + const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val.toValue(), + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, - }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); + }, + location, + ), + .int => |int| try writer.print("{x}", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }), + .eu_payload, .opt_payload => |base| { + const base_ty = mod.intern_pool.typeOf(base).toType().childType(mod); + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(base_ty, .complete); + try writer.writeAll("&("); + try dg.renderParentPtr(writer, base, location); + try writer.writeAll(")->payload"); }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - + .elem => |elem| { + try writer.writeAll("&("); + try dg.renderParentPtr(writer, elem.base, location); + try writer.print(")[{d}]", .{elem.index}); + }, + .field => |field| { + const base_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - - const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty); - - switch (fieldLocation( - field_ptr.container_ty, - ptr_ty, - @intCast(u32, field_ptr.field_index), - mod, - )) { - .begin => try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ), - .field => |field| { + _ = try dg.typeToIndex(base_ty, .complete); + switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + .begin => try dg.renderParentPtr(writer, field.base, location), + .field => |name| { try writer.writeAll("&("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.writeAll(")->"); - try dg.writeCValue(writer, field); + try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); - const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other), }); }, .end => { try writer.writeAll("(("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(") + {})", .{ try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ptr_ty = try mod.ptrType(.{ - .size = .C, - .elem_type = elem_ptr.elem_ty.ip_index, - }); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); - try writer.print(")[{d}]", .{elem_ptr.index}); - }, - .opt_payload_ptr, .eu_payload_ptr => { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const container_ptr_ty = try mod.ptrType(.{ - .elem_type = payload_ptr.container_ty.ip_index, - .size = .C, - }); - - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->payload"); - }, - else => unreachable, + .comptime_field => unreachable, } } @@ -723,11 +654,12 @@ pub const DeclGen = struct { arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - const mod = dg.module; const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, @@ -928,175 +860,8 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag(mod)) { - .Int => switch (val.tag()) { - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), - }, - .Float => { - const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128, mod); - - // All unsigned ints matching float types are pre-allocated. - const repr_ty = mod.intType(.unsigned, bits) catch unreachable; - - assert(bits <= 128); - var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; - var repr_val_big = BigInt.Mutable{ - .limbs = &repr_val_limbs, - .len = undefined, - .positive = undefined, - }; - switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), - 128 => repr_val_big.set(@bitCast(u128, f128_val)), - else => unreachable, - } - - const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); - - try writer.writeAll("zig_cast_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte(' '); - var empty = true; - if (std.math.isFinite(f128_val)) { - try writer.writeAll("zig_make_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), - 128 => try writer.print("{x}", .{f128_val}), - else => unreachable, - } - try writer.writeAll(", "); - empty = false; - } else { - // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan - const operation = if (std.math.isNan(f128_val)) - "nan" - else if (std.math.isSignalNan(f128_val)) - "nans" - else if (std.math.isInf(f128_val)) - "inf" - else - unreachable; - - if (location == .StaticInitializer) { - if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) - return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); - - // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression - - // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly - // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) - // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); - } - - try writer.writeAll("zig_"); - try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); - try writer.writeAll("_special_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - if (std.math.signbit(f128_val)) try writer.writeByte('-'); - try writer.writeAll(", "); - try writer.writeAll(operation); - try writer.writeAll(", "); - if (std.math.isNan(f128_val)) switch (bits) { - // We only actually need to pass the significand, but it will get - // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), - 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), - else => unreachable, - }; - try writer.writeAll(", "); - empty = false; - } - try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); - if (!empty) try writer.writeByte(')'); - return; - }, - .Pointer => switch (val.ip_index) { - .null_value => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, - .none => switch (val.tag()) { - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const slice = val.castTag(.slice).?.data; - - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type); - try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); - try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - else => unreachable, - }, - }, + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array, .Vector => { if (location == .FunctionArgument) { try writer.writeByte('('); @@ -1129,17 +894,6 @@ pub const DeclGen = struct { return; }, .none => switch (val.tag()) { - .empty_array => { - const ai = ty.arrayInfo(mod); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - return; - }, .bytes, .str_lit => |t| { const bytes = switch (t) { .bytes => val.castTag(.bytes).?.data, @@ -1210,91 +964,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); } }, - .Bool => { - if (val.toBool(mod)) { - return writer.writeAll("true"); - } else { - return writer.writeAll("false"); - } - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - - const is_null_val = Value.makeBool(val.ip_index == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) - return dg.renderValue(writer, Type.bool, is_null_val, location); - - if (ty.optionalReprIsPayload(mod)) { - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; - return dg.renderValue(writer, payload_ty, payload_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; - - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); - try writer.writeAll(" }"); - }, - .ErrorSet => { - if (val.castTag(.@"error")) |error_pl| { - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); - } else { - try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); - } - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const error_ty = ty.errorUnionSet(mod); - const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; - - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return dg.renderValue(writer, error_ty, error_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); - try writer.writeAll(" }"); - }, - .Enum => switch (val.ip_index) { - .none => { - const int_tag_ty = try ty.intTagType(mod); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - else => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); - }, - }, - .Fn => switch (val.tag()) { - .function => { - const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - else => unreachable, - }, .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; @@ -1408,7 +1077,448 @@ pub const DeclGen = struct { } }, }, - .Union => { + + .Frame, + .AnyFrame, + => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ + @tagName(tag), + }), + + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try writer.writeAll(@tagName(simple_value)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), + .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + }, + .err => |err| try writer.print("zig_error_{}", .{ + fmtIdent(mod.intern_pool.stringToSlice(err.name)), + }), + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); + const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.anyerror, 0) else val; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return dg.renderValue(writer, error_ty, error_val, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, payload_val, initializer_type); + try writer.writeAll(", .error = "); + try dg.renderValue(writer, error_ty, error_val, initializer_type); + try writer.writeAll(" }"); + }, + .enum_tag => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, + .float => { + const bits = ty.floatBits(target); + const f128_val = val.toFloat(f128, mod); + + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; + + assert(bits <= 128); + var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; + var repr_val_big = BigInt.Mutable{ + .limbs = &repr_val_limbs, + .len = undefined, + .positive = undefined, + }; + + switch (bits) { + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), + 128 => repr_val_big.set(@bitCast(u128, f128_val)), + else => unreachable, + } + + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); + + try writer.writeAll("zig_cast_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte(' '); + var empty = true; + if (std.math.isFinite(f128_val)) { + try writer.writeAll("zig_make_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + switch (bits) { + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), + 128 => try writer.print("{x}", .{f128_val}), + else => unreachable, + } + try writer.writeAll(", "); + empty = false; + } else { + // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan + const operation = if (std.math.isNan(f128_val)) + "nan" + else if (std.math.isSignalNan(f128_val)) + "nans" + else if (std.math.isInf(f128_val)) + "inf" + else + unreachable; + + if (location == .StaticInitializer) { + if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) + return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); + + // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression + + // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly + // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) + // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); + } + + try writer.writeAll("zig_"); + try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); + try writer.writeAll("_special_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + if (std.math.signbit(f128_val)) try writer.writeByte('-'); + try writer.writeAll(", "); + try writer.writeAll(operation); + try writer.writeAll(", "); + if (std.math.isNan(f128_val)) switch (bits) { + // We only actually need to pass the significand, but it will get + // properly masked anyway, so just pass the whole value. + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), + 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), + else => unreachable, + }; + try writer.writeAll(", "); + empty = false; + } + try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); + if (!empty) try writer.writeByte(')'); + }, + .ptr => |ptr| { + if (ptr.len != .none) { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + try writer.writeByte('{'); + } + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ty, + val, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + location, + ), + .int => |int| { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + try writer.print("){x})", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }); + }, + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.renderParentPtr(writer, val.ip_index, location), + .comptime_field => unreachable, + } + if (ptr.len != .none) { + try writer.writeAll(", "); + try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type); + try writer.writeByte('}'); + } + }, + .opt => |opt| { + const payload_ty = ty.optionalChild(mod); + + const is_null_val = Value.makeBool(opt.val == .none); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + return dg.renderValue(writer, Type.bool, is_null_val, location); + + if (ty.optionalReprIsPayload(mod)) { + return dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intValue(payload_ty, 0), + else => opt.val.toValue(), + }, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => opt.val, + }.toValue(), initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); + try writer.writeAll(" }"); + }, + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type, .vector_type => { + if (location == .FunctionArgument) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + // Fall back to generic implementation. + + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + }, + .struct_type, .anon_struct_type => switch (ty.containerLayout(mod)) { + .Auto, .Extern => { + const field_vals = val.castTag(.aggregate).?.data; + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeByte(','); + try dg.renderValue(writer, field_ty, field_val, initializer_type); + + empty = false; + } + try writer.writeByte('}'); + }, + .Packed => { + const field_vals = val.castTag(.aggregate).?.data; + const int_info = ty.intInfo(mod); + + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); + + var bit_offset: u64 = 0; + + var eff_num_fields: usize = 0; + for (0..field_vals.len) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + eff_num_fields += 1; + } + + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const cast_context = IntCastContext{ .value = .{ .value = field_val } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field_ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + if (bit_offset != 0) { + try dg.renderValue(writer, field_ty, field_val, .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field_ty, field_val, .Other); + } + + bit_offset += field_ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); + } + }, + }, + else => unreachable, + }, + .un => { const union_obj = val.castTag(.@"union").?.data; if (!location.isInitializer()) { @@ -1461,22 +1571,6 @@ pub const DeclGen = struct { if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), } } @@ -1504,8 +1598,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.castTag(.function)) |func_payload| - if (func_payload.data.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -1747,18 +1840,12 @@ pub const DeclGen = struct { fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { const mod = dg.module; - switch (tv.val.tag()) { - .extern_fn => return true, - .function => { - const func = tv.val.castTag(.function).?.data; - return mod.decl_exports.contains(func.owner_decl); - }, - .variable => { - const variable = tv.val.castTag(.variable).?.data; - return mod.decl_exports.contains(variable.owner_decl); - }, + return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .variable => |variable| mod.decl_exports.contains(variable.decl), + .extern_func => true, + .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl), else => unreachable, - } + }; } fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -1833,7 +1920,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, member); } - fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void { + fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void { const decl = dg.module.declPtr(decl_index); const fwd_decl_writer = dg.fwd_decl.writer(); const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern; @@ -1844,7 +1931,7 @@ pub const DeclGen = struct { fwd_decl_writer, decl.ty, .{ .decl = decl_index }, - CQualifiers.init(.{ .@"const" = !variable.is_mutable }), + CQualifiers.init(.{ .@"const" = variable.is_const }), decl.@"align", .complete, ); @@ -1858,7 +1945,7 @@ pub const DeclGen = struct { if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); - } else if (decl.isExtern()) { + } else if (decl.isExtern(mod)) { try writer.writeAll(mem.span(decl.name)); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), @@ -2416,8 +2503,11 @@ pub fn genErrDecls(o: *Object) !void { var max_name_len: usize = 0; for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); - var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; - try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); + const err_val = try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(name).unwrap().?, + } }); + try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); } o.indent_writer.popIndent(); @@ -2451,7 +2541,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = try mod.arrayType(.{ .len = mod.error_name_list.items.len, - .child = .const_slice_u8_sentinel_0_type, + .child = .slice_const_u8_sentinel_0_type, .sentinel = .zero_u8, }); @@ -2497,7 +2587,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.const_slice_u8_sentinel_0; + const name_slice_ty = Type.slice_const_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2668,14 +2758,13 @@ pub fn genDecl(o: *Object) !void { const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; - if (tv.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (tv.val.castTag(.variable)) |var_payload| { - const variable: *Module.Var = var_payload.data; + } else if (decl.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); @@ -2690,7 +2779,7 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); - try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); + try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { @@ -4157,10 +4246,13 @@ fn airCall( known: { const fn_decl = fn_decl: { const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; - break :fn_decl switch (callee_val.tag()) { - .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, - .function => callee_val.castTag(.function).?.data.owner_decl, - .decl_ref => callee_val.castTag(.decl_ref).?.data, + break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => break :known, + }, else => break :known, }; }; @@ -4231,9 +4323,9 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].castTag(.function).?.data; const mod = f.object.dg.module; + const writer = f.object.writer(); + const function = f.air.values[ty_pl.payload].getFunction(mod).?; try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6634,9 +6726,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, accum, .Other); try writer.writeAll(" = "); - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { @@ -6654,7 +6743,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(arena.allocator(), mod), + .Int => try scalar_ty.minInt(mod), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8dec958806..f8ddddad1c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -582,7 +582,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -866,10 +866,11 @@ pub const Object = struct { pub fn updateFunc( o: *Object, mod: *Module, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, ) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const target = mod.getTarget(); @@ -886,7 +887,7 @@ pub const Object = struct { const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (mod.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func_index)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -1164,7 +1165,7 @@ pub const Object = struct { di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.val.tag() != .extern_fn and + const is_internal_linkage = decl.getExternFunc(mod) == null and !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn @@ -1269,18 +1270,20 @@ pub const Object = struct { // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; const decl = mod.declPtr(decl_index); - if (decl.isExtern()) { - const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); - const mangle_name = is_wasm_fn and - decl.getExternFn().?.lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); - const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ - decl.name, decl.getExternFn().?.lib_name.?, - }); - break :name tmp.ptr; - } else decl.name; - defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); + if (decl.isExtern(mod)) { + var free_decl_name = false; + const decl_name = decl_name: { + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + free_decl_name = true; + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); + } + } + } + break :decl_name std.mem.span(decl.name); + }; + defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { @@ -1303,13 +1306,13 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) { + if (variable.is_weak_linkage) { llvm_global.setLinkage(.ExternalWeak); } } @@ -1345,8 +1348,8 @@ pub const Object = struct { defer gpa.free(section_z); llvm_global.setSection(section_z); } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } @@ -1379,9 +1382,9 @@ pub const Object = struct { llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.val.castTag(.variable)) |variable| { + if (decl.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); @@ -1510,12 +1513,11 @@ pub const Object = struct { for (enum_type.names, 0..) |field_name_ip, i| { const field_name_z = ip.stringToSlice(field_name_ip); - var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const storage = if (enum_type.values.len != 0) - ip.indexToKey(enum_type.values[i]).int.storage + var bigint_space: Value.BigIntSpace = undefined; + const bigint = if (enum_type.values.len != 0) + enum_type.values[i].toValue().toBigInt(&bigint_space, mod) else - InternPool.Key.Int.Storage{ .u64 = i }; - const bigint = storage.toBigInt(&bigint_space); + std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -2442,6 +2444,7 @@ pub const DeclGen = struct { } fn genDecl(dg: *DeclGen) !void { + const mod = dg.module; const decl = dg.decl; const decl_index = dg.decl_index; assert(decl.has_tv); @@ -2449,19 +2452,16 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - assert(decl.val.ip_index != .none or decl.val.tag() != .function); - if (decl.val.castTag(.extern_fn)) |extern_fn| { - _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); + if (decl.getExternFunc(mod)) |extern_func| { + _ = try dg.resolveLlvmFunction(extern_func.decl); } else { - const mod = dg.module; const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); - const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { - const variable = payload.data; - break :init_val variable.init; + const init_val = if (decl.getVariable(mod)) |variable| init_val: { + break :init_val variable.init.toValue(); } else init_val: { global.setGlobalConstant(.True); break :init_val decl.val; @@ -2519,7 +2519,7 @@ pub const DeclGen = struct { ); try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern()) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); } } } @@ -2548,17 +2548,16 @@ pub const DeclGen = struct { const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; - const is_extern = decl.isExtern(); + const is_extern = decl.isExtern(mod); if (!is_extern) { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (decl.getExternFn().?.lib_name) |lib_name| { - const module_name = std.mem.sliceTo(lib_name, 0); - if (!std.mem.eql(u8, module_name, "c")) { - dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } } } @@ -2695,11 +2694,12 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(dg.object.decl_map.remove(decl_index)); - const decl = dg.module.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(dg.module); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); - const target = dg.module.getTarget(); + const target = mod.getTarget(); const llvm_type = try dg.lowerType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); @@ -2712,18 +2712,18 @@ pub const DeclGen = struct { gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern()) { + if (decl.isExtern(mod)) { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = dg.module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + const single_threaded = mod.comp.bin_file.options.single_threaded; + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); } } else { llvm_global.setLinkage(.Internal); @@ -3199,468 +3199,344 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - if (tv.val.castTag(.runtime_value)) |rt| { - tv.val = rt.data; + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .runtime_value => |rt| tv.val = rt.val.toValue(), + else => {}, } - if (tv.val.isUndef(mod)) { + if (tv.val.isUndefDeep(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - switch (tv.ty.zigTypeTag(mod)) { - .Bool => { - const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); - }, - .Int => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - return lowerBigInt(dg, tv.ty, bigint); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); - return lowerBigInt(dg, tv.ty, bigint); - }, - else => unreachable, - }, - }, - .Enum => { - const int_val = try tv.enumToInt(mod); - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, mod); - - const int_info = tv.ty.intInfo(mod); - const llvm_type = dg.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .Float => { - const llvm_ty = try dg.lowerType(tv.ty); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); - const llvm_i16 = dg.context.intType(16); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); - const llvm_i32 = dg.context.intType(32); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); - const llvm_i64 = dg.context.intType(64); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80, mod); - const repr = std.math.break_f80(float); - const llvm_i80 = dg.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } - }, - .Pointer => switch (tv.val.ip_index) { - .null_value => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(mod), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return dg.lowerIntAsPtr(int), - .ptr => |ptr| { - const ptr_tv: TypedValue = switch (ptr.len) { - .none => tv, - else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, - }; - const llvm_ptr_val = switch (ptr.addr) { - .@"var" => |@"var"| ptr: { - const decl = dg.module.declPtr(@"var".owner_decl); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(@"var".owner_decl); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - break :ptr addrspace_casted_ptr; - }, - .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), - .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), - .eu_payload, - .opt_payload, - .elem, - .field, - => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), - .comptime_field => unreachable, - }; - switch (ptr.len) { - .none => return llvm_ptr_val, - else => { - const fields: [2]*llvm.Value = .{ - llvm_ptr_val, - try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - } - }, - else => unreachable, + if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { + .Array => switch (tv.val.tag()) { + .bytes => { + const bytes = tv.val.castTag(.bytes).?.data; + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ); }, - }, - .Array => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { + .str_lit => { + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + if (tv.ty.sentinel(mod)) |sent_val| { + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); + if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); - const llvm_elems = try gpa.alloc(*llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len], 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + .False, // Yes, null terminate. ); } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + var array = std.ArrayList(u8).init(dg.gpa); + defer array.deinit(); + try array.ensureUnusedCapacity(bytes.len + 1); + array.appendSliceAssumeCapacity(bytes); + array.appendAssumeCapacity(byte); + return dg.context.constString( + array.items.ptr, + @intCast(c_uint, array.items.len), + .True, // Don't null terminate. + ); + } else { + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, bytes.len), + .True, // Don't null terminate. `bytes` has the sentinel, if any. + ); + } + }, + else => unreachable, + }, + .Struct => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; + + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); + + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } if (need_unnamed) { return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, ); } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), ); } }, - .empty_array_sentinel => { - const elem_ty = tv.ty.childType(mod); - const sent_val = tv.ty.sentinel(mod).?; - const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } - }, + .struct_type => |struct_type| struct_type, else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elem_vals| { - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals, 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated_elem => |val| { - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + }; - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); - } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - }, - else => unreachable, - }, - }, - .Optional => { - comptime assert(optional_layout_version == 3); - const payload_ty = tv.ty.optionalChild(mod); + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; + } - const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(mod); - const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return non_null_bit; + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) { - .none => if (tv.val.castTag(.opt_payload)) |payload| - try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }) - else if (is_pl) - try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }) - else - llvm_ty.constNull(), - .null_value => llvm_ty.constNull(), - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => llvm_ty.constNull(), - else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), - }, - else => unreachable, - }, - }; - assert(payload_ty.zigTypeTag(mod) != .Fn); - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try dg.lowerValue(.{ - .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef, - }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); } - return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .Fn => { - const fn_decl_index = switch (tv.val.tag()) { - .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, - .function => tv.val.castTag(.function).?.data.owner_decl, - else => unreachable, - }; - const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); - return dg.resolveLlvmFunction(fn_decl_index); + .Vector => switch (tv.val.tag()) { + .bytes => { + // Note, sentinel is not stored even if the type has a sentinel. + const bytes = tv.val.castTag(.bytes).?.data; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len or vector_len + 1 == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .str_lit => { + // Note, sentinel is not stored + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + else => unreachable, }, - .ErrorSet => { + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Frame, + .AnyFrame, + => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => { + const llvm_type = try dg.lowerType(tv.ty); + return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); + }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + return lowerBigInt(dg, tv.ty, bigint); + }, + .err => |err| { const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return llvm_ty.constInt(int.storage.u64, .False), - else => unreachable, - }, - } + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return llvm_ty.constInt(kv.value, .False); }, - .ErrorUnion => { + .error_union => |error_union| { const payload_type = tv.ty.errorUnionPayload(mod); - const is_pl = tv.val.errorUnionIsPayload(); + const is_pl = tv.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. @@ -3676,7 +3552,10 @@ pub const DeclGen = struct { }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .payload => |payload| payload, + }.toValue(), }); var fields_buf: [3]*llvm.Value = undefined; @@ -3697,172 +3576,396 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields_buf, llvm_field_count, .False); } }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; + .enum_tag => { + const int_val = try tv.enumToInt(mod); - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_space, mod); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const int_info = tv.ty.intInfo(mod); + const llvm_type = dg.context.intType(int_info.bits); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + .float => { + const llvm_ty = try dg.lowerType(tv.ty); + switch (tv.ty.floatBits(target)) { + 16 => { + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const llvm_i16 = dg.context.intType(16); + const int = llvm_i16.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 32 => { + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const llvm_i32 = dg.context.intType(32); + const int = llvm_i32.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 64 => { + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const llvm_i64 = dg.context.intType(64); + const int = llvm_i64.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 80 => { + const float = tv.val.toFloat(f80, mod); + const repr = std.math.break_f80(float); + const llvm_i80 = dg.context.intType(80); + var x = llvm_i80.constInt(repr.exp, .False); + x = x.constShl(llvm_i80.constInt(64, .False)); + x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); + if (backendSupportsF80(target)) { + return x.constBitCast(llvm_ty); + } else { + return x; + } + }, + 128 => { + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + // LLVM seems to require that the lower half of the f128 be placed first + // in the buffer. + if (native_endian == .Big) { + std.mem.swap(u64, &buf[0], &buf[1]); + } + const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + return int.constBitCast(llvm_ty); + }, + else => unreachable, + } + }, + .ptr => |ptr| { + const ptr_tv: TypedValue = switch (ptr.len) { + .none => tv, + else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + }; + const llvm_ptr_val = switch (ptr.addr) { + .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + .comptime_field => unreachable, + }; + switch (ptr.len) { + .none => return llvm_ptr_val, + else => { + const fields: [2]*llvm.Value = .{ + llvm_ptr_val, + try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + } + }, + .opt => |opt| { + comptime assert(optional_layout_version == 3); + const payload_ty = tv.ty.optionalChild(mod); - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + const llvm_i8 = dg.context.intType(8); + const non_null_bit = switch (opt.val) { + .none => llvm_i8.constNull(), + else => llvm_i8.constInt(1, .False), + }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return non_null_bit; + } + const llvm_ty = try dg.lowerType(tv.ty); + if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => llvm_ty.constNull(), + else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + }; + assert(payload_ty.zigTypeTag(mod) != .Fn); - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const llvm_field_count = llvm_ty.countStructElementTypes(); + var fields_buf: [3]*llvm.Value = undefined; + fields_buf[0] = try dg.lowerValue(.{ + .ty = payload_ty, + .val = switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => |payload| payload, + }.toValue(), + }); + fields_buf[1] = non_null_bit; + if (llvm_field_count > 2) { + assert(llvm_field_count == 3); + fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + } + return dg.context.constStruct(&fields_buf, llvm_field_count, .False); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .array_type => switch (aggregate.storage) { + .bytes => |bytes| return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ), + .elems => |elem_vals| { + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals, 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated_elem => |val| { + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(field_ty.toType(), mod, i), - }); + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + }, + .vector_type => |vector_type| { + const elem_ty = vector_type.child.toType(); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*llvm_elem, i| { + llvm_elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }.toValue(), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .struct_type, .anon_struct_type => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; - llvm_fields.appendAssumeCapacity(field_llvm_val); + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } + llvm_fields.appendAssumeCapacity(field_llvm_val); - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index), - }); + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + else => unreachable, }, - .Union => { + .un => { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { .none => tv.val.castTag(.@"union").?.data, @@ -3950,96 +4053,6 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .aggregate => { - // Note, sentinel is not stored even if the type has a sentinel. - // The value includes the sentinel in those cases. - const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .repeated => { - // Note, sentinel is not stored even if the type has a sentinel. - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), } } @@ -4094,10 +4107,9 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - if (ptr_val.ip_index != .none) return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { - .@"var" => |@"var"| dg.lowerParentPtrDecl(ptr_val, @"var".owner_decl), .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), @@ -4150,7 +4162,7 @@ pub const DeclGen = struct { const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; - const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod)); + const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().elemType2(mod)); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { @@ -4185,7 +4197,7 @@ pub const DeclGen = struct { .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { @@ -4230,148 +4242,6 @@ pub const DeclGen = struct { }, else => unreachable, }; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .variable => { - const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); - const parent_ty = field_ptr.container_ty; - - const field_index = @intCast(u32, field_ptr.field_index); - const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag(mod)) { - .Union => { - if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; - } - - const layout = parent_ty.unionGetLayout(mod); - if (layout.payload_size == 0) { - // In this case a pointer to the union and a pointer to any - // (void) payload is the same. - return parent_llvm_ptr; - } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @boolToInt(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .Struct => { - if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.ptrBitWidth()); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); - // count bits of fields before this one - const prev_bits = b: { - var b: usize = 0; - for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); - } - break :b b; - }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = dg.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); - } - - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } - }, - .Pointer => { - assert(parent_ty.isSlice(mod)); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true); - - const llvm_usize = try dg.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .opt_payload_ptr => { - const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - - const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or - payload_ty.optionalReprIsPayload(mod)) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .eu_payload_ptr => { - const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } } fn lowerDeclRefValue( @@ -4380,20 +4250,6 @@ pub const DeclGen = struct { decl_index: Module.Decl.Index, ) Error!*llvm.Value { const mod = self.module; - if (tv.ty.isSlice(mod)) { - const ptr_ty = tv.ty.slicePtrFieldType(mod); - const fields: [2]*llvm.Value = .{ - try self.lowerValue(.{ - .ty = ptr_ty, - .val = tv.val, - }), - try self.lowerValue(.{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), - }), - }; - return self.context.constStruct(&fields, fields.len, .False); - } // In the case of something like: // fn foo() void {} @@ -4401,13 +4257,13 @@ pub const DeclGen = struct { // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + if (decl.getFunction(mod)) |func| { + if (func.owner_decl != decl_index) { + return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.val.castTag(.extern_fn)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + } else if (decl.getExternFunc(mod)) |func| { + if (func.decl != decl_index) { + return self.lowerDeclRefValue(tv, func.decl); } } @@ -6333,11 +6189,11 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -6395,8 +6251,8 @@ pub const FuncGen = struct { if (self.dg.object.di_builder == null) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8349,7 +8205,7 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction().?; + const func = self.dg.decl.getFunction(mod).?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( @@ -9147,7 +9003,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9861,7 +9717,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 612ac1f252..96c723989a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -236,9 +236,9 @@ pub const DeclGen = struct { if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { - const fn_decl_index = switch (val.tag()) { - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, + const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, else => unreachable, }; const spv_decl_index = try self.resolveDecl(fn_decl_index); @@ -261,7 +261,7 @@ pub const DeclGen = struct { const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.val.tag() == .function) + const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none) .func else .global; @@ -573,6 +573,7 @@ pub const DeclGen = struct { fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void { const dg = self.dg; + const mod = dg.module; const ty_ref = try self.dg.resolveType(ty, .indirect); const ty_id = dg.typeId(ty_ref); @@ -580,8 +581,8 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); const spv_decl_index = try dg.resolveDecl(decl_index); - switch (decl.val.tag()) { - .function => { + switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. @@ -589,7 +590,7 @@ pub const DeclGen = struct { // TODO: Add dependency return; }, - .extern_fn => unreachable, // TODO + .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); @@ -610,39 +611,23 @@ pub const DeclGen = struct { } } - fn lower(self: *@This(), ty: Type, val: Value) !void { + fn lower(self: *@This(), ty: Type, arg_val: Value) !void { const dg = self.dg; const mod = dg.module; - if (val.isUndef(mod)) { + var val = arg_val; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, + } + + if (val.isUndefDeep(mod)) { const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag(mod)) { - .Int => try self.addInt(ty, val), - .Float => try self.addFloat(ty, val), - .Bool => try self.addConstBool(val.toBool(mod)), + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array => switch (val.tag()) { - .aggregate => { - const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. - for (elem_vals[0..len]) |elem_val| { - try self.lower(elem_ty, elem_val); - } - }, - .repeated => { - const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLen(mod)); - for (0..len) |_| { - try self.lower(elem_ty, elem_val); - } - if (ty.sentinel(mod)) |sentinel| { - try self.lower(elem_ty, sentinel); - } - }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; @@ -657,29 +642,6 @@ pub const DeclGen = struct { }, else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), }, - .Pointer => switch (val.tag()) { - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - try self.addDeclRef(ty, decl_index); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - try self.addDeclRef(ty, decl_index); - }, - .slice => { - const slice = val.castTag(.slice).?.data; - - const ptr_ty = ty.slicePtrFieldType(mod); - - try self.lower(ptr_ty, slice.ptr); - try self.addInt(Type.usize, slice.len); - }, - .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try self.addInt(Type.usize, val); - }, - else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), - }, .Struct => { if (ty.isSimpleTupleOrAnonStruct(mod)) { unreachable; // TODO @@ -705,20 +667,134 @@ pub const DeclGen = struct { } } }, - .Optional => { + .Vector, + .Frame, + .AnyFrame, + => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try self.addConstBool(val.toBool(mod)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => try self.addInt(ty, val), + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const is_pl = val.errorUnionIsPayload(mod); + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); + + const eu_layout = dg.errorUnionLayout(payload_ty); + if (!eu_layout.payload_has_bits) { + return try self.lower(Type.anyerror, error_val); + } + + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); + const padding = ty_size - payload_size - error_size; + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + if (eu_layout.error_first) { + try self.lower(Type.anyerror, error_val); + try self.lower(payload_ty, payload_val); + } else { + try self.lower(payload_ty, payload_val); + try self.lower(Type.anyerror, error_val); + } + + try self.addUndef(padding); + }, + .enum_tag => { + const int_val = try val.enumToInt(ty, mod); + + const int_ty = try ty.intTagType(mod); + + try self.lower(int_ty, int_val); + }, + .float => try self.addFloat(ty, val), + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try self.addDeclRef(ty, decl), + .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl), + else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), + } + if (ptr.len != .none) { + try self.addInt(Type.usize, ptr.len.toValue()); + } + }, + .opt => { const payload_ty = ty.optionalChild(mod); - const has_payload = !val.isNull(mod); + const payload_val = val.optionalValue(mod); const abi_size = ty.abiSize(mod); if (!payload_ty.hasRuntimeBits(mod)) { - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); return; } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); - } else if (has_payload) { - try self.lower(payload_ty, val); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { const ptr_ty_ref = try dg.resolveType(ty, .indirect); try self.addNullPtr(ptr_ty_ref); @@ -734,27 +810,63 @@ pub const DeclGen = struct { const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { try self.addUndef(payload_size); } - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); try self.addUndef(padding); }, - .Enum => { - const int_val = try val.enumToInt(ty, mod); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => |array_type| { + const elem_ty = array_type.child.toType(); + switch (aggregate.storage) { + .bytes => |bytes| try self.addBytes(bytes), + .elems, .repeated_elem => { + for (0..array_type.len) |i| { + try self.lower(elem_ty, switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .repeated_elem => |elem_val| elem_val.toValue(), + }); + } + }, + } + if (array_type.sentinel != .none) { + try self.lower(elem_ty, array_type.sentinel.toValue()); + } + }, + .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .struct_type => { + const struct_ty = mod.typeToStruct(ty).?; - const int_ty = try ty.intTagType(mod); + if (struct_ty.layout == .Packed) { + return dg.todo("packed struct constants", .{}); + } - try self.lower(int_ty, int_val); + const struct_begin = self.size; + const field_vals = val.castTag(.aggregate).?.data; + for (struct_ty.fields.values(), 0..) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; + try self.lower(field.ty, field_vals[i]); + + // Add padding if required. + // TODO: Add to type generation as well? + const unpadded_field_end = self.size - struct_begin; + const padded_field_end = ty.structFieldOffset(i + 1, mod); + const padding = padded_field_end - unpadded_field_end; + try self.addUndef(padding); + } + }, + .anon_struct_type => unreachable, // TODO + else => unreachable, }, - .Union => { - const tag_and_val = val.castTag(.@"union").?.data; + .un => |un| { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const union_ty = mod.typeToUnion(ty).?; @@ -762,18 +874,18 @@ pub const DeclGen = struct { return dg.todo("packed union constants", .{}); } - const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; + const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?; const active_field_ty = union_ty.fields.values()[active_field].ty; const has_tag = layout.tag_size != 0; const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { - try self.lower(active_field_ty, tag_and_val.val); + try self.lower(active_field_ty, un.val.toValue()); break :blk active_field_ty.abiSize(mod); } else 0; @@ -781,53 +893,11 @@ pub const DeclGen = struct { try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.ip_index) { - .none => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), - else => unreachable, - }, - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); - - const eu_layout = dg.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.lower(Type.anyerror, error_val); - } - - const payload_size = payload_ty.abiSize(mod); - const error_size = Type.anyerror.abiAlignment(mod); - const ty_size = ty.abiSize(mod); - const padding = ty_size - payload_size - error_size; - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - - if (eu_layout.error_first) { - try self.lower(Type.anyerror, error_val); - try self.lower(payload_ty, payload_val); - } else { - try self.lower(payload_ty, payload_val); - try self.lower(Type.anyerror, error_val); - } - - try self.addUndef(padding); - }, - else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}), } } }; @@ -1542,7 +1612,7 @@ pub const DeclGen = struct { const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ @@ -1595,8 +1665,8 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.val.castTag(.variable)) |payload| - payload.data.init + const init_val = if (decl.getVariable(mod)) |payload| + payload.init.toValue() else decl.val; diff --git a/src/link.zig b/src/link.zig index 1f34b0f760..a44a7387e9 100644 --- a/src/link.zig +++ b/src/link.zig @@ -564,7 +564,8 @@ pub const File = struct { } /// May be called before or after updateDeclExports for any given Decl. - pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { + pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { + const func = module.funcPtr(func_index); const owner_decl = module.declPtr(func.owner_decl); log.debug("updateFunc {*} ({s}), type={}", .{ owner_decl, owner_decl.name, owner_decl.ty.fmt(module), @@ -575,14 +576,14 @@ pub const File = struct { } switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), - .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), - .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), - .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness), + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness), // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 1a25bfe231..c871d8a02a 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -87,12 +87,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { } } -pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.allocator; + const func = module.funcPtr(func_index); const decl_index = func.owner_decl; const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { @@ -111,7 +112,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, - .func = func, + .func_index = func_index, .object = .{ .dg = .{ .gpa = gpa, @@ -555,7 +556,8 @@ fn flushDecl( export_names: std.StringHashMapUnmanaged(void), ) FlushDeclError!void { const gpa = self.base.allocator; - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -569,7 +571,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name)))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index efaeebc62e..f4ee2fde97 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,18 +1032,19 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(mod, func, air, liveness); + return llvm_object.updateFunc(mod, func_index, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1057,7 +1058,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, livenes const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -1155,11 +1156,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,7 +1172,7 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, @@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1425,7 +1425,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getFunction() == null) continue; + if (exported_decl.getFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index ed2883f4da..d6dd6979ea 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.val.castTag(.function).?.data; + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1514,7 +1514,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons } } -pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1522,8 +1522,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = module.declPtr(decl_index); - const func = decl.val.castTag(.function).?.data; + const decl = mod.declPtr(decl_index); + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b27967884e..476b939038 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2574,17 +2574,18 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -2599,11 +2600,11 @@ pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, @@ -2646,11 +2647,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2667,7 +2667,7 @@ pub fn updateDecl( defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e7723595db..ffbdcdb91f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1847,16 +1847,17 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1874,11 +1875,11 @@ pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, livene defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, @@ -1983,18 +1984,17 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.val.castTag(.variable)) |payload| - payload.data.is_threadlocal and !self.base.options.single_threaded + const is_threadlocal = if (decl.getVariable(mod)) |variable| + variable.is_threadlocal and !self.base.options.single_threaded else false; if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); @@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.val.castTag(.variable).?.data.init; + const decl_val = decl.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2278,8 +2278,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } break :blk self.data_section_index.?; @@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 69cd73a602..b74518d930 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void { self.base.allocator.free(self.ptx_file_name); } -pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (!build_options.have_llvm) return; - try self.llvm_object.updateFunc(module, func, air, liveness); + try self.llvm_object.updateFunc(module, func_index, air, liveness); } pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 968cbb0e7e..2071833b93 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -276,11 +276,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); @@ -299,7 +300,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, livene const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -391,11 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = (decl.val.tag() == .function); + const is_fn = decl.getFunctionIndex(mod) != .none; if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index da25753b95..0a6608303e 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void { self.decl_link.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const func = module.funcPtr(func_index); + var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link); defer decl_gen.deinit(); @@ -136,7 +138,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ef97a7fa7f..78d1be978b 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1324,17 +1324,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { return index; } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); @@ -1358,7 +1359,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes const result = try codegen.generateFunction( &wasm.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_writer, @@ -1403,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { return; - } else if (decl.val.castTag(.extern_fn)) |_| { + } else if (decl.getExternFunc(mod)) |_| { return; } @@ -1413,12 +1414,13 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const atom = wasm.getAtomPtr(atom_index); atom.clear(); - if (decl.isExtern()) { - const variable = decl.getVariable().?; + if (decl.isExtern(mod)) { + const variable = decl.getVariable(mod).?; const name = mem.sliceTo(decl.name, 0); - return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null); + const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); + return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); @@ -1791,7 +1793,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { assert(wasm.symbol_atom.remove(local_atom.symbolLoc())); } - if (decl.isExtern()) { + if (decl.isExtern(mod)) { _ = wasm.imports.remove(atom.symbolLoc()); } _ = wasm.resolved_symbols.swapRemove(atom.symbolLoc()); @@ -1852,7 +1854,7 @@ pub fn addOrUpdateImport( /// Symbol index that is external symbol_index: u32, /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[*:0]const u8, + lib_name: ?[:0]const u8, /// The index of the type that represents the function signature /// when the extern is a function. When this is null, a data-symbol /// is asserted instead. @@ -1863,7 +1865,7 @@ pub fn addOrUpdateImport( // Also mangle the name when the lib name is set and not equal to "C" so imports with the same // name but different module can be resolved correctly. const mangle_name = lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c"); + !std.mem.eql(u8, lib_name.?, "c"); const full_name = if (mangle_name) full_name: { break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? }); } else name; @@ -1889,7 +1891,7 @@ pub fn addOrUpdateImport( if (type_index) |ty_index| { const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null }); const module_name = if (lib_name) |l_name| blk: { - break :blk mem.sliceTo(l_name, 0); + break :blk l_name; } else wasm.host_name; if (!gop.found_existing) { gop.value_ptr.* = .{ @@ -2931,7 +2933,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2990,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated @@ -3366,15 +3368,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod var decl_it = wasm.decls.iterator(); while (decl_it.next()) |entry| { const decl = mod.declPtr(entry.key_ptr.*); - if (decl.isExtern()) continue; + if (decl.isExtern(mod)) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable()) |variable| { - if (!variable.is_mutable) { + } else if (decl.getVariable(mod)) |variable| { + if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep(mod)) { + } else if (variable.init.toValue().isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/print_air.zig b/src/print_air.zig index ef52b4c085..9169a88bbc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -699,8 +699,8 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const function = w.air.values[ty_pl.payload].castTag(.function).?.data; - const owner_decl = w.module.declPtr(function.owner_decl); + const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); + const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); try s.print("{s}", .{owner_decl.name}); } diff --git a/src/type.zig b/src/type.zig index f2fad91eba..087dc88c30 100644 --- a/src/type.zig +++ b/src/type.zig @@ -93,16 +93,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .simple_value => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -358,7 +365,7 @@ pub const Type = struct { const func = ies.func; try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); + const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl); try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, @@ -467,16 +474,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -675,16 +689,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -777,16 +798,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -866,8 +894,8 @@ pub const Type = struct { /// May capture a reference to `ty`. /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -880,7 +908,7 @@ pub const Type = struct { pub const AbiAlignmentAdvancedStrat = union(enum) { eager, - lazy: Allocator, + lazy, sema: *Sema, }; @@ -1019,16 +1047,18 @@ pub const Type = struct { if (!struct_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (struct_obj.layout == .Packed) { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1039,7 +1069,10 @@ pub const Type = struct { var big_align: u32 = 0; for (fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1050,7 +1083,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; big_align = @max(big_align, field_align); @@ -1077,7 +1113,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // field type alignment not resolved .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1092,16 +1131,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1118,7 +1164,10 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = code_align }; @@ -1128,7 +1177,7 @@ pub const Type = struct { (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, - .lazy => |arena| { + .lazy => { switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ @@ -1137,7 +1186,10 @@ pub const Type = struct { }, .val => {}, } - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }; }, } } @@ -1160,16 +1212,22 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, - .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1198,7 +1256,10 @@ pub const Type = struct { if (!union_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // union layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (union_obj.fields.count() == 0) { if (have_tag) { @@ -1212,7 +1273,10 @@ pub const Type = struct { if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1223,7 +1287,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; max_align = @max(max_align, field_align); @@ -1232,8 +1299,8 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -1283,7 +1350,10 @@ pub const Type = struct { .scalar => |elem_size| return .{ .scalar = len * elem_size }, .val => switch (strat) { .sema, .eager => unreachable, - .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, } }, @@ -1291,9 +1361,10 @@ pub const Type = struct { const opt_sema = switch (strat) { .sema => |sema| sema, .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); const elem_bits = @intCast(u32, elem_bits_u64); @@ -1301,9 +1372,10 @@ pub const Type = struct { const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; @@ -1320,7 +1392,10 @@ pub const Type = struct { // in abiAlignmentAdvanced. const code_size = abiSize(Type.anyerror, mod); if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { // Same as anyerror. @@ -1333,7 +1408,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1420,11 +1498,10 @@ pub const Type = struct { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1433,12 +1510,13 @@ pub const Type = struct { else => { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { + .lazy => { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return AbiSizeAdvanced{ .scalar = 0 }; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } + if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }; }, .eager => {}, } @@ -1469,16 +1547,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1492,11 +1577,10 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!union_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; @@ -1514,7 +1598,10 @@ pub const Type = struct { } if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; @@ -1527,7 +1614,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1690,16 +1780,23 @@ pub const Type = struct { .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -2270,16 +2367,23 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2443,16 +2547,17 @@ pub const Type = struct { .inferred_error_set_type, => return null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try array_type.child.toType().onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try seq_type.child.toType().onePossibleValue(mod)) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); + } return null; }, .opt_type => |child| { @@ -2595,16 +2700,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2733,16 +2845,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2802,13 +2921,12 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + pub fn minInt(ty: Type, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// Asserts that the type is an integer. @@ -2832,13 +2950,12 @@ pub const Type = struct { // Works for vectors and vectors of integers. /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value { + pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// The returned Value will have type dest_ty. @@ -3386,12 +3503,12 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type }; + pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type, }; - pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type }; + pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; diff --git a/src/value.zig b/src/value.zig index b1c94d46b5..47215e588c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,64 +33,12 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - /// The only possible value for a particular type, which is stored externally. - the_only_possible_value, - - empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. - function, - extern_fn, - /// A comptime-known pointer can point to the address of a global - /// variable. The child element value in this case will have this tag. - variable, - /// A wrapper for values which are comptime-known but should - /// semantically be runtime-known. - runtime_value, - /// Represents a pointer to a Decl. - /// When machine codegen backend sees this, it must set the Decl's `alive` field to true. - decl_ref, - /// Pointer to a Decl, but allows comptime code to mutate the Decl's Value. - /// This Tag will never be seen by machine codegen backends. It is changed into a - /// `decl_ref` when a comptime variable goes out of scope. - decl_ref_mut, - /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value. - comptime_field_ptr, - /// Pointer to a specific element of an array, vector or slice. - elem_ptr, - /// Pointer to a specific field of a struct or union. - field_ptr, /// A slice of u8 whose memory is managed externally. bytes, /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. str_lit, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - repeated, - /// An array with length 0 but it has a sentinel. - empty_array_sentinel, - /// Pointer and length as sub `Value` objects. - slice, - enum_literal, - @"error", - /// When the type is error union: - /// * If the tag is `.@"error"`, the error union is an error. - /// * If the tag is `.eu_payload`, the error union is a payload. - /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union - /// is non-error, but the inner error union is an error, is represented as - /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. - eu_payload, - /// A pointer to the payload of an error union, based on a pointer to an error union. - eu_payload_ptr, - /// When the type is optional: - /// * If the tag is `.null_value`, the optional is null. - /// * If the tag is `.opt_payload`, the optional is a payload. - /// * A nested optional such as `??T` in which the the outer optional - /// is non-null, but the inner optional is null, is represented as - /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. - opt_payload, - /// A pointer to the payload of an optional, based on a pointer to an optional. - opt_payload_ptr, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -104,57 +52,19 @@ pub const Value = struct { /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc /// instructions for comptime code. inferred_alloc_comptime, - /// The ABI alignment of the payload type. - lazy_align, - /// The ABI size of the payload type. - lazy_size, - pub const last_no_payload_tag = Tag.empty_array; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + pub const no_payload_count = 0; pub fn Type(comptime t: Tag) type { return switch (t) { - .the_only_possible_value, - .empty_array, - => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - - .extern_fn => Payload.ExternFn, - - .decl_ref => Payload.Decl, - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => Payload.SubValue, - - .eu_payload_ptr, - .opt_payload_ptr, - => Payload.PayloadPtr, - - .bytes, - .enum_literal, - => Payload.Bytes, + .bytes => Payload.Bytes, .str_lit => Payload.StrLit, - .slice => Payload.Slice, - - .lazy_align, - .lazy_size, - => Payload.Ty, - - .function => Payload.Function, - .variable => Payload.Variable, - .decl_ref_mut => Payload.DeclRefMut, - .elem_ptr => Payload.ElemPtr, - .field_ptr => Payload.FieldPtr, - .@"error" => Payload.Error, + .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, - .comptime_field_ptr => Payload.ComptimeFieldPtr, }; } @@ -249,91 +159,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .the_only_possible_value, - .empty_array, - => unreachable, - - .lazy_align, .lazy_size => { - const payload = self.cast(Payload.Ty).?; - const new_payload = try arena.create(Payload.Ty); - new_payload.* = .{ - .base = payload.base, - .data = payload.data, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .function => return self.copyPayloadShallow(arena, Payload.Function), - .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), - .variable => return self.copyPayloadShallow(arena, Payload.Variable), - .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl), - .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut), - .eu_payload_ptr, - .opt_payload_ptr, - => { - const payload = self.cast(Payload.PayloadPtr).?; - const new_payload = try arena.create(Payload.PayloadPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .comptime_field_ptr => { - const payload = self.cast(Payload.ComptimeFieldPtr).?; - const new_payload = try arena.create(Payload.ComptimeFieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .field_val = try payload.data.field_val.copy(arena), - .field_ty = payload.data.field_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .elem_ptr => { - const payload = self.castTag(.elem_ptr).?; - const new_payload = try arena.create(Payload.ElemPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = payload.data.elem_ty, - .index = payload.data.index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .field_ptr => { - const payload = self.castTag(.field_ptr).?; - const new_payload = try arena.create(Payload.FieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - .field_index = payload.data.field_index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -347,52 +172,6 @@ pub const Value = struct { }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => { - const payload = self.cast(Payload.SubValue).?; - const new_payload = try arena.create(Payload.SubValue); - new_payload.* = .{ - .base = payload.base, - .data = try payload.data.copy(arena), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .slice => { - const payload = self.castTag(.slice).?; - const new_payload = try arena.create(Payload.Slice); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .ptr = try payload.data.ptr.copy(arena), - .len = try payload.data.len.copy(arena), - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .enum_literal => { - const payload = self.castTag(.enum_literal).?; - const new_payload = try arena.create(Payload.Bytes); - new_payload.* = .{ - .base = payload.base, - .data = try arena.dupe(u8, payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .@"error" => return self.copyPayloadShallow(arena, Payload.Error), - .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -453,7 +232,7 @@ pub const Value = struct { pub fn dump( start_val: Value, comptime fmt: []const u8, - options: std.fmt.FormatOptions, + _: std.fmt.FormatOptions, out_stream: anytype, ) !void { comptime assert(fmt.len == 0); @@ -469,44 +248,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .lazy_align => { - try out_stream.writeAll("@alignOf("); - try val.castTag(.lazy_align).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .lazy_size => { - try out_stream.writeAll("@sizeOf("); - try val.castTag(.lazy_size).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .runtime_value => return out_stream.writeAll("[runtime value]"), - .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), - .extern_fn => return out_stream.writeAll("(extern function)"), - .variable => return out_stream.writeAll("(variable)"), - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - return out_stream.print("(decl_ref_mut {d})", .{decl_index}); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - return out_stream.print("(decl_ref {d})", .{decl_index}); - }, - .comptime_field_ptr => { - return out_stream.writeAll("(comptime_field_ptr)"); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try out_stream.print("&[{}] ", .{elem_ptr.index}); - val = elem_ptr.array_ptr; - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try out_stream.print("fieldptr({d}) ", .{field_ptr.field_index}); - val = field_ptr.container_ptr; - }, - .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -514,31 +255,8 @@ pub const Value = struct { str_lit.index, str_lit.len, }); }, - .repeated => { - try out_stream.writeAll("(repeated) "); - val = val.castTag(.repeated).?.data; - }, - .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"), - .slice => return out_stream.writeAll("(slice)"), - .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - try out_stream.writeAll("(eu_payload) "); - val = val.castTag(.eu_payload).?.data; - }, - .opt_payload => { - try out_stream.writeAll("(opt_payload) "); - val = val.castTag(.opt_payload).?.data; - }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), - .eu_payload_ptr => { - try out_stream.writeAll("(eu_payload_ptr)"); - val = val.castTag(.eu_payload_ptr).?.data.container_ptr; - }, - .opt_payload_ptr => { - try out_stream.writeAll("(opt_payload_ptr)"); - val = val.castTag(.opt_payload_ptr).?.data.container_ptr; - }, }; } @@ -569,30 +287,23 @@ pub const Value = struct { const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return allocator.dupe(u8, bytes); }, - .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), - .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, mod); - }, - .the_only_possible_value => return &[_]u8{}, - .slice => { - const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); - }, else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), .ptr => |ptr| switch (ptr.len) { .none => unreachable, - else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; + }, }, else => unreachable, }, @@ -611,29 +322,6 @@ pub const Value = struct { pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); switch (val.tag()) { - .elem_ptr => { - const pl = val.castTag(.elem_ptr).?.data; - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = .{ .elem = .{ - .base = pl.array_ptr.ip_index, - .index = pl.index, - } }, - } }); - }, - .slice => { - const pl = val.castTag(.slice).?.data; - const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = mod.intern_pool.indexToKey(ptr).ptr.addr, - .len = try pl.len.intern(Type.usize, mod), - } }); - }, - .opt_payload => return mod.intern(.{ .opt = .{ - .ty = ty.ip_index, - .val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod), - } }), .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); @@ -651,13 +339,6 @@ pub const Value = struct { .storage = .{ .elems = new_elems }, } }); }, - .repeated => return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern( - ty.structFieldType(0, mod), - mod, - ) }, - } }), .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ @@ -679,7 +360,6 @@ pub const Value = struct { for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, - .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -698,31 +378,21 @@ pub const Value = struct { pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; switch (val.ip_index) { - .none => { - const field_index = switch (val.tag()) { - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name, mod).?; - }, - else => unreachable, - }; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.ip_index)) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; + }, .enum_type => |enum_type| (try ip.getCoerced( mod.gpa, val.ip_index, @@ -733,18 +403,12 @@ pub const Value = struct { } } - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - _ = ty; // TODO: remove this parameter now that we use InternPool - - if (val.castTag(.enum_literal)) |payload| { - return payload.data; - } - + pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, + .enum_literal => |name| return ip.stringToSlice(name), else => unreachable, }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; @@ -773,49 +437,61 @@ pub const Value = struct { .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => BigIntMutable.init(&space.limbs, 0).toConst(), - - .runtime_value => { - const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, mod, opt_sema); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiAlignment(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiSize(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => int.storage.toBigInt(space), + .lazy_align, .lazy_size => |ty| { + if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType()); + const x = switch (int.storage) { + else => unreachable, + .lazy_align => ty.toType().abiAlignment(mod), + .lazy_size => ty.toType().abiSize(mod), + }; + return BigIntMutable.init(&space.limbs, x).toConst(); + }, }, - - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(mod); - const new_addr = array_addr + elem_size * elem_ptr.index; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); + .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .ptr => |ptr| switch (ptr.len) { + .none => switch (ptr.addr) { + .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); + const new_addr = base_addr + elem.index * elem_size; + return BigIntMutable.init(&space.limbs, new_addr).toConst(); + }, + else => unreachable, + }, + else => unreachable, }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| int.storage.toBigInt(space), - .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space), else => unreachable, }, }; } + pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn { + return mod.funcPtrUnwrap(val.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + } + + pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func, + else => null, + } else null; + } + + pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable, + else => null, + } else null; + } + /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { @@ -825,42 +501,27 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiAlignment(mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiSize(mod); - } - }, - - else => return null, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), + .lazy_align => |ty| if (opt_sema) |sema| + (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiAlignment(mod), + .lazy_size => |ty| if (opt_sema) |sema| + (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiSize(mod), }, else => null, }, - } + }; } /// Asserts the value is an integer and it fits in a u64 @@ -870,58 +531,40 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(mod)); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(mod)); - }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, .u64 => |x| @intCast(i64, x), + .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), }, else => unreachable, }, - } + }; } - pub fn toBool(val: Value, mod: *const Module) bool { + pub fn toBool(val: Value, _: *const Module) bool { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| !big_int.eqZero(), - inline .u64, .i64 => |x| x != 0, - }, - else => unreachable, - }, + else => unreachable, }; } - fn isDeclRef(val: Value) bool { + fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return true, + .eu_payload, .opt_payload => |index| check = index.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + else => return false, + }, else => return false, }; } @@ -953,24 +596,9 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - const int_val = try val.enumToInt(ty, mod); - - if (byte_count <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); - const int: u64 = switch (ip_key.int.storage) { - .u64 => |x| x, - .i64 => |x| @bitCast(u64, x), - .big_int => unreachable, - }; - for (buffer[0..byte_count], 0..) |_, i| switch (endian) { - .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - }; - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, mod); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - } + var bigint_buffer: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buffer, mod); + bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), @@ -1016,7 +644,12 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; + const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .err => |err| err.name, + .error_union => |error_union| error_union.val.err_name, + else => unreachable, + }; + const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?; std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, .Union => switch (ty.containerLayout(mod)) { @@ -1029,7 +662,7 @@ pub const Value = struct { }, .Pointer => { if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { @@ -1141,14 +774,14 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); const field_type = ty.unionFields(mod).values()[field_index.?].ty; - const field_val = try val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { @@ -1262,13 +895,11 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - - const payload = try arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] }, - }; - return Value.initPayload(&payload.base); + const name = mod.error_name_list.items[@intCast(usize, int)]; + return (try mod.intern(.{ .err = .{ + .ty = ty.ip_index, + .name = mod.intern_pool.getString(name).unwrap().?, + } })).toValue(); }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. @@ -1383,7 +1014,7 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), @@ -1393,6 +1024,8 @@ pub const Value = struct { } return @intToFloat(T, x); }, + .lazy_align => |ty| @intToFloat(T, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intToFloat(T, ty.toType().abiSize(mod)), }, .float => |float| switch (float.storage) { inline else => |x| @floatCast(T, x), @@ -1421,89 +1054,24 @@ pub const Value = struct { } pub fn clz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => ty_bits - 1, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.clz(ty_bits); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.clz(ty_bits), - .u64 => |x| @clz(x) + ty_bits - 64, - .i64 => @panic("TODO implement i64 Value clz"), - }, - else => unreachable, - }, - }; + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.clz(ty.intInfo(mod).bits); } - pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => 0, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.ctz(); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.ctz(), - .u64 => |x| { - const big = @ctz(x); - return if (big == 64) ty_bits else big; - }, - .i64 => @panic("TODO implement i64 Value ctz"), - }, - else => unreachable, - }, - }; + pub fn ctz(val: Value, _: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.ctz(); } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - assert(!val.isUndef(mod)); - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return @intCast(u64, big_int.popCount(info.bits)); - }, - else => unreachable, - }, - } + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; @@ -1520,8 +1088,6 @@ pub const Value = struct { } pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 @@ -1543,41 +1109,9 @@ pub const Value = struct { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { - const target = mod.getTarget(); - return switch (self.ip_index) { - .bool_false => 0, - .bool_true => 1, - .none => switch (self.tag()) { - .the_only_possible_value => 0, - - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .decl_ref, - .function, - .variable, - .eu_payload_ptr, - .opt_payload_ptr, - => target.ptrBitWidth(), - - else => { - var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, mod).bitCountTwosComp(); - }, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.bitCountTwosComp(), - .u64 => |x| if (x == 0) 0 else @intCast(usize, std.math.log2(x) + 1), - .i64 => { - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.bitCountTwosComp(); - }, - }, - else => unreachable, - }, - }; + var buffer: BigIntSpace = undefined; + const big_int = self.toBigInt(&buffer, mod); + return big_int.bitCountTwosComp(); } /// Converts an integer or a float to a float. May result in a loss of information. @@ -1616,84 +1150,39 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - switch (lhs.ip_index) { - .bool_false => return .eq, - .bool_true => return .gt, - .none => return switch (lhs.tag()) { - .the_only_possible_value => .eq, - - .decl_ref, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .function, - .variable, - => .gt, - - .runtime_value => { - // This is needed to correctly handle hashing the value. - // Checks in Sema should prevent direct comparisons from reaching here. - const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(mod, opt_sema); - }, - - .lazy_align => { - const ty = lhs.castTag(.lazy_align).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - .lazy_size => { - const ty = lhs.castTag(.lazy_size).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - - .elem_ptr => { - const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { + return switch (lhs.ip_index) { + .bool_false => .eq, + .bool_true => .gt, + else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => .gt, + .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), + .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, - .gt => return .gt, - .eq => { - if (elem_ptr.index == 0) { - return .eq; - } else { - return .gt; - } - }, - } + .gt => .gt, + .eq => if (elem.index == 0) .eq else .gt, + }, + else => unreachable, }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(lhs.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), + .lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced( + mod, + false, + if (opt_sema) |sema| .{ .sema = sema } else .eager, + ) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) .gt else .eq, }, - .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) { - .big_int => |big_int| big_int.orderAgainstScalar(0), - inline .u64, .i64 => |x| std.math.order(x, 0), - }, + .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, else => unreachable, }, - } + }; } /// Asserts the value is comparable. @@ -1760,8 +1249,8 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) !bool { - if (lhs.pointerDecl()) |lhs_decl| { - if (rhs.pointerDecl()) |rhs_decl| { + if (lhs.pointerDecl(mod)) |lhs_decl| { + if (rhs.pointerDecl(mod)) |rhs_decl| { switch (op) { .eq => return lhs_decl == rhs_decl, .neq => return lhs_decl != rhs_decl, @@ -1774,7 +1263,7 @@ pub const Value = struct { else => {}, } } - } else if (rhs.pointerDecl()) |_| { + } else if (rhs.pointerDecl(mod)) |_| { switch (op) { .eq => return false, .neq => return true, @@ -1849,7 +1338,6 @@ pub const Value = struct { switch (lhs.ip_index) { .none => switch (lhs.tag()) { - .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), .aggregate => { for (lhs.castTag(.aggregate).?.data) |elem_val| { if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; @@ -1877,6 +1365,15 @@ pub const Value = struct { .float => |float| switch (float.storage) { inline else => |x| if (std.math.isNan(x)) return op == .neq, }, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + }, else => {}, }, } @@ -1910,69 +1407,6 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .the_only_possible_value => return true, - .enum_literal => { - const a_name = a.castTag(.enum_literal).?.data; - const b_name = b.castTag(.enum_literal).?.data; - return std.mem.eql(u8, a_name, b_name); - }, - .opt_payload => { - const a_payload = a.castTag(.opt_payload).?.data; - const b_payload = b.castTag(.opt_payload).?.data; - const payload_ty = ty.optionalChild(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .slice => { - const a_payload = a.castTag(.slice).?.data; - const b_payload = b.castTag(.slice).?.data; - if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, opt_sema))) { - return false; - } - - const ptr_ty = ty.slicePtrFieldType(mod); - - return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); - }, - .elem_ptr => { - const a_payload = a.castTag(.elem_ptr).?.data; - const b_payload = b.castTag(.elem_ptr).?.data; - if (a_payload.index != b_payload.index) return false; - - return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, opt_sema); - }, - .field_ptr => { - const a_payload = a.castTag(.field_ptr).?.data; - const b_payload = b.castTag(.field_ptr).?.data; - if (a_payload.field_index != b_payload.field_index) return false; - - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .@"error" => { - const a_name = a.castTag(.@"error").?.data.name; - const b_name = b.castTag(.@"error").?.data.name; - return std.mem.eql(u8, a_name, b_name); - }, - .eu_payload => { - const a_payload = a.castTag(.eu_payload).?.data; - const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .eu_payload_ptr => { - const a_payload = a.castTag(.eu_payload_ptr).?.data; - const b_payload = b.castTag(.eu_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .opt_payload_ptr => { - const a_payload = a.castTag(.opt_payload_ptr).?.data; - const b_payload = b.castTag(.opt_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .function => { - const a_payload = a.castTag(.function).?.data; - const b_payload = b.castTag(.function).?.data; - return a_payload == b_payload; - }, .aggregate => { const a_field_vals = a.castTag(.aggregate).?.data; const b_field_vals = b.castTag(.aggregate).?.data; @@ -2035,17 +1469,15 @@ pub const Value = struct { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .@"error") { - return false; - } + }; - if (a.pointerDecl()) |a_decl| { - if (b.pointerDecl()) |b_decl| { + if (a.pointerDecl(mod)) |a_decl| { + if (b.pointerDecl(mod)) |b_decl| { return a_decl == b_decl; } else { return false; } - } else if (b.pointerDecl()) |_| { + } else if (b.pointerDecl(mod)) |_| { return false; } @@ -2130,25 +1562,11 @@ pub const Value = struct { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (b_tag == .opt_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, - .ErrorUnion => if (a_tag != .@"error" and b_tag == .eu_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, + .Optional, + .ErrorUnion, + => unreachable, // handled by InternPool else => {}, } - if (a_tag == .@"error") return false; return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } @@ -2166,7 +1584,7 @@ pub const Value = struct { std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; switch (zig_ty_tag) { .Opaque => unreachable, // Cannot hash opaque types @@ -2177,38 +1595,20 @@ pub const Value = struct { .Null, => {}, - .Type => unreachable, // handled via ip_index check above - .Float => { - // For hash/eql purposes, we treat floats as their IEEE integer representation. - switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - else => unreachable, - } - }, - .ComptimeFloat => { - const float = val.toFloat(f128, mod); - const is_nan = std.math.isNan(float); - std.hash.autoHash(hasher, is_nan); - if (!is_nan) { - std.hash.autoHash(hasher, @bitCast(u128, float)); - } else { - std.hash.autoHash(hasher, std.math.signbit(float)); - } - }, - .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - hash(slice.ptr, ptr_ty, hasher, mod); - hash(slice.len, Type.usize, hasher, mod); - }, - - else => return hashPtr(val, hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Optional, + .ErrorUnion, + .ErrorSet, + .Enum, + .EnumLiteral, + .Fn, + => unreachable, // handled via ip_index check above .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2233,42 +1633,6 @@ pub const Value = struct { else => unreachable, } }, - .Optional => { - if (val.castTag(.opt_payload)) |payload| { - std.hash.autoHash(hasher, true); // non-null - const sub_val = payload.data; - const sub_ty = ty.optionalChild(mod); - sub_val.hash(sub_ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); // null - } - }, - .ErrorUnion => { - if (val.tag() == .@"error") { - std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(mod); - val.hash(sub_ty, hasher, mod); - return; - } - - if (val.castTag(.eu_payload)) |payload| { - std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(mod); - payload.data.hash(sub_ty, hasher, mod); - return; - } else unreachable; - }, - .ErrorSet => { - // just hash the literal error value. this is the most stable - // thing between compiler invocations. we can't use the error - // int cause (1) its not stable and (2) we don't have access to mod. - hasher.update(val.getError().?); - }, - .Enum => { - // This panic will go away when enum values move to be stored in the intern pool. - const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); - hashInt(int_val, hasher, mod); - }, .Union => { const union_obj = val.cast(Payload.Union).?.data; if (ty.unionTagType(mod)) |tag_ty| { @@ -2277,27 +1641,12 @@ pub const Value = struct { const active_field_ty = ty.unionFieldType(union_obj.tag, mod); union_obj.val.hash(active_field_ty, hasher, mod); }, - .Fn => { - // Note that this hashes the *Fn/*ExternFn rather than the *Decl. - // This is to differentiate function bodies from function pointers. - // This is currently redundant since we already hash the zig type tag - // at the top of this function. - if (val.castTag(.function)) |func| { - std.hash.autoHash(hasher, func.data); - } else if (val.castTag(.extern_fn)) |func| { - std.hash.autoHash(hasher, func.data); - } else unreachable; - }, .Frame => { @panic("TODO implement hashing frame values"); }, .AnyFrame => { @panic("TODO implement hashing anyframe values"); }, - .EnumLiteral => { - const bytes = val.castTag(.enum_literal).?.data; - hasher.update(bytes); - }, } } @@ -2308,7 +1657,7 @@ pub const Value = struct { pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; if (val.ip_index != .none) { // The InternPool data structure hashes based on Key to make interned objects @@ -2326,16 +1675,20 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Type => unreachable, // handled above with the ip_index check - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - slice.ptr.hashUncoerced(ptr_ty, hasher, mod); - }, - else => val.hashPtr(hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Fn, + .Optional, + .ErrorSet, + .ErrorUnion, + .Enum, + .EnumLiteral, + => unreachable, // handled above with the ip_index check .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2348,21 +1701,16 @@ pub const Value = struct { elem_val.hashUncoerced(elem_ty, hasher, mod); } }, - .Optional => if (val.castTag(.opt_payload)) |payload| { - const child_ty = ty.optionalChild(mod); - payload.data.hashUncoerced(child_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(mod); - val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); - }, - .Enum, .EnumLiteral, .Union => { - hasher.update(val.tagName(ty, mod)); - if (val.cast(Payload.Union)) |union_obj| { - const active_field_ty = ty.unionFieldType(union_obj.data.tag, mod); - union_obj.data.val.hashUncoerced(active_field_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Void); - }, + .Union => { + hasher.update(val.tagName(mod)); + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .un => |un| { + const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); + un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); + }, + else => std.hash.autoHash(hasher, std.builtin.TypeId.Void), + } + }, .Frame => @panic("TODO implement hashing frame values"), .AnyFrame => @panic("TODO implement hashing anyframe values"), } @@ -2397,57 +1745,53 @@ pub const Value = struct { } }; - pub fn isComptimeMutablePtr(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut, .comptime_field_ptr => true, - .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), - .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), - .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), - + pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .mut_decl, .comptime_field => true, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), + .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod), else => false, }, else => false, }; } - pub fn canMutateComptimeVarState(val: Value) bool { - if (val.isComptimeMutablePtr()) return true; - return switch (val.ip_index) { - .none => switch (val.tag()) { - .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), - .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .aggregate => { - const fields = val.castTag(.aggregate).?.data; - for (fields) |field| { - if (field.canMutateComptimeVarState()) return true; - } - return false; + pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { + return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => false, + .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), }, - .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), - .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), - else => return false, + .ptr => |ptr| switch (ptr.addr) { + .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod), + else => false, + }, + .opt => |opt| switch (opt.val) { + .none => false, + else => opt.val.toValue().canMutateComptimeVarState(mod), + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (elem.toValue().canMutateComptimeVarState(mod)) break true; + } else false, + .un => |un| un.val.toValue().canMutateComptimeVarState(mod), + else => false, }, - else => return false, }; } /// Gets the decl referenced by this pointer. If the pointer does not point /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. - pub fn pointerDecl(val: Value) ?Module.Decl.Index { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, - .variable => val.castTag(.variable).?.data.owner_decl, - .decl_ref => val.cast(Payload.Decl).?.data, + pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.decl, + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => null, }, else => null, @@ -2463,95 +1807,15 @@ pub const Value = struct { } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { - switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - .extern_fn, - .function, - .variable, - => { - const decl: Module.Decl.Index = ptr_val.pointerDecl().?; - std.hash.autoHash(hasher, decl); - }, - .comptime_field_ptr => { - std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr); - }, - - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, mod); - std.hash.autoHash(hasher, Value.Tag.elem_ptr); - std.hash.autoHash(hasher, elem_ptr.index); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, mod); - std.hash.autoHash(hasher, field_ptr.field_index); - }, - .eu_payload_ptr => { - const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, mod); - }, - .opt_payload_ptr => { - const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, mod); - }, - - .the_only_possible_value, - .lazy_align, - .lazy_size, - => return hashInt(ptr_val, hasher, mod), - - else => unreachable, - } - } + pub const slice_ptr_index = 0; + pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue(); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. - .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val, - else => unreachable, - }; + return mod.intern_pool.slicePtr(val.ip_index).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(mod); - } else { - return 1; - } - }, - else => unreachable, - }; + return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, @@ -2560,14 +1824,6 @@ pub const Value = struct { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { - // This is the case of accessing an element of an undef array. - .empty_array => unreachable, // out of bounds array index - - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, - .bytes => { const byte = val.castTag(.bytes).?.data[index]; return mod.intValue(Type.u8, byte); @@ -2579,128 +1835,101 @@ pub const Value = struct { return mod.intValue(Type.u8, byte); }, - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValue(mod, index + data.index); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index, mod); - const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValue(mod, index); - } else unreachable; - }, - - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, - - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - - .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .ptr => |ptr| switch (ptr.addr) { - .@"var" => unreachable, .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), .int, .eu_payload, .opt_payload => unreachable, .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), - .field => unreachable, - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| elems[index].toValue(), - .repeated_elem => |elem| elem.toValue(), + .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { + const base_decl = mod.declPtr(decl_index); + const field_val = try base_decl.val.fieldValue(mod, field.index); + return field_val.elemValue(mod, index); + } else unreachable, + }, + .aggregate => |aggregate| { + const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); + if (index < len) return switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(); + assert(index == len); + return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue(); }, else => unreachable, }, } } - pub fn isLazyAlign(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_align; - } - - pub fn isLazySize(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_size; + pub fn isLazyAlign(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_align, + else => false, + }; } - pub fn isRuntimeValue(val: Value) bool { - return val.ip_index == .none and val.tag() == .runtime_value; + pub fn isLazySize(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_size, + else => false, + }; } - pub fn tagIsVariable(val: Value) bool { - return val.ip_index == .none and val.tag() == .variable; + pub fn isRuntimeValue(val: Value, mod: *Module) bool { + return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => true, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); assert(decl.has_tv); return decl.val.isVariable(mod); }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); assert(decl.has_tv); return decl.val.isVariable(mod); }, - - .variable => true, - else => false, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isVariable(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isVariable(mod), + .elem, .field => |base_index| base_index.base.toValue().isVariable(mod), }, else => false, }; } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .variable => false, - else => val.isPtrToThreadLocalInner(mod), - }, - else => val.isPtrToThreadLocalInner(mod), - }; - } - - fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), - .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), - - .variable => val.castTag(.variable).?.data.is_threadlocal, - else => false, + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, else => false, }; @@ -2714,39 +1943,42 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.tag()) { - .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); + return switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + return Tag.str_lit.create(arena, .{ + .index = @intCast(u32, str_lit.index + start), + .len = @intCast(u32, end - start), + }); + }, + else => unreachable, }, - .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end), - - .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end), - .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end), - .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end), - .elem_ptr => blk: { - const elem_ptr = val.castTag(.elem_ptr).?.data; - break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), + else => unreachable, + }, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.ip_index), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, }, - - .repeated, - .the_only_possible_value, - => val, - - else => unreachable, }; } - pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { + pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, - .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -2757,13 +1989,14 @@ pub const Value = struct { // TODO assert the tag is correct return payload.val; }, - - .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, - else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), @@ -2785,40 +2018,37 @@ pub const Value = struct { pub fn elemPtr( val: Value, ty: Type, - arena: Allocator, index: usize, mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - else => val, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.len) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| ptr: { + switch (ptr.addr) { + .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = elem.base, + .index = elem.index + index, + } }, + } })).toValue(), + else => {}, + } + break :ptr switch (ptr.len) { .none => val, else => val.slicePtr(mod), - }, - else => val, + }; }, + else => val, }; - - if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - if (elem_ptr.elem_ty.eql(elem_ty, mod)) { - return Tag.elem_ptr.create(arena, .{ - .array_ptr = elem_ptr.array_ptr, - .elem_ty = elem_ptr.elem_ty, - .index = elem_ptr.index + index, - }); - } - } - return Tag.elem_ptr.create(arena, .{ - .array_ptr = ptr_val, - .elem_ty = elem_ty, - .index = index, - }); + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = ptr_val.ip_index, + .index = index, + } }, + } })).toValue(); } pub fn isUndef(val: Value, mod: *Module) bool { @@ -2840,69 +2070,44 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - switch (val.ip_index) { - .undef => return true, + return switch (val.ip_index) { + .undef => true, .none => switch (val.tag()) { - .slice => { - const payload = val.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod); - - for (0..len) |i| { - const elem_val = try payload.data.ptr.elemValue(mod, i); - if (try elem_val.anyUndef(mod)) return true; - } - }, - - .aggregate => { - const payload = val.castTag(.aggregate).?; - for (payload.data) |field| { - if (try field.anyUndef(mod)) return true; - } - }, - else => {}, + .aggregate => for (val.castTag(.aggregate).?.data) |field| { + if (try field.anyUndef(mod)) break true; + } else false, + else => false, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .undef => return true, - .simple_value => |v| if (v == .undefined) return true, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| { - if (try anyUndef(elem.toValue(), mod)) return true; - }, - .repeated_elem => |elem| if (try anyUndef(elem.toValue(), mod)) return true, - }, - else => {}, + .undef => true, + .simple_value => |v| v == .undefined, + .ptr => |ptr| switch (ptr.len) { + .none => false, + else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; + } else false, + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (try anyUndef(elem.toValue(), mod)) break true; + } else false, + else => false, }, - } - - return false; + }; } /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(val: Value, mod: *const Module) bool { + pub fn isNull(val: Value, mod: *Module) bool { return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - .none => switch (val.tag()) { - .opt_payload => false, - - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. - - .the_only_possible_value => true, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - - else => false, - }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.eqZero(), - inline .u64, .i64 => |x| x == 0, + .int => { + var buf: BigIntSpace = undefined; + return val.toBigInt(&buf, mod).eqZero(); }, .opt => |opt| opt.val == .none, else => false, @@ -2914,53 +2119,28 @@ pub const Value = struct { /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether /// something is an error or not because it works without having to figure out the /// string. - pub fn getError(self: Value) ?[]const u8 { - return switch (self.ip_index) { - .undef => unreachable, - .unreachable_value => unreachable, - .none => switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .eu_payload => null, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => unreachable, + pub fn getError(self: Value, mod: *const Module) ?[]const u8 { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + .err => |err| err.name.toOptional(), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| err_name.toOptional(), + .payload => .none, }, else => unreachable, - }; + }); } /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. - pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.ip_index) { - .undef => unreachable, - .none => switch (val.tag()) { - .eu_payload => true, - else => false, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - }, - else => false, - }; + pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { + return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (val.ip_index) { - .none => if (val.isNull(mod)) null - // Valid for optional representation to be the direct value - // and not use opt_payload. - else if (val.castTag(.opt_payload)) |p| p.data else val, - .null_value => null, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => null, - else => opt.val.toValue(), - }, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + .none => null, + else => |index| index.toValue(), }; } @@ -3001,28 +2181,8 @@ pub const Value = struct { } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - switch (val.ip_index) { - .undef => return val, - .none => switch (val.tag()) { - .the_only_possible_value => return mod.floatValue(float_ty, 0), // for i0, u0 - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiAlignment(mod), float_ty, mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiSize(mod), float_ty, mod); - } - }, - else => unreachable, - }, + return switch (val.ip_index) { + .undef => val, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| { @@ -3030,10 +2190,20 @@ pub const Value = struct { return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); + }, }, else => unreachable, }, - } + }; } fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value { @@ -4768,81 +3938,6 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const Function = struct { - base: Payload, - data: *Module.Fn, - }; - - pub const ExternFn = struct { - base: Payload, - data: *Module.ExternFn, - }; - - pub const Decl = struct { - base: Payload, - data: Module.Decl.Index, - }; - - pub const Variable = struct { - base: Payload, - data: *Module.Var, - }; - - pub const SubValue = struct { - base: Payload, - data: Value, - }; - - pub const DeclRefMut = struct { - pub const base_tag = Tag.decl_ref_mut; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - decl_index: Module.Decl.Index, - runtime_index: RuntimeIndex, - }; - }; - - pub const PayloadPtr = struct { - base: Payload, - data: struct { - container_ptr: Value, - container_ty: Type, - }, - }; - - pub const ComptimeFieldPtr = struct { - base: Payload, - data: struct { - field_val: Value, - field_ty: Type, - }, - }; - - pub const ElemPtr = struct { - pub const base_tag = Tag.elem_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - array_ptr: Value, - elem_ty: Type, - index: usize, - }, - }; - - pub const FieldPtr = struct { - pub const base_tag = Tag.field_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - container_ptr: Value, - container_ty: Type, - field_index: usize, - }, - }; - pub const Bytes = struct { base: Payload, /// Includes the sentinel, if any. @@ -4861,32 +3956,6 @@ pub const Value = struct { data: []Value, }; - pub const Slice = struct { - base: Payload, - data: struct { - ptr: Value, - len: Value, - }, - - pub const ptr_index = 0; - pub const len_index = 1; - }; - - pub const Ty = struct { - base: Payload, - data: Type, - }; - - pub const Error = struct { - base: Payload = .{ .tag = .@"error" }, - data: struct { - /// `name` is owned by `Module` and will be valid for the entire - /// duration of the compilation. - /// TODO revisit this when we have the concept of the error tag type - name: []const u8, - }, - }; - pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 6cccf77ee0..555cda135d 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -533,8 +533,8 @@ type_tag_handlers = { 'empty_struct_literal': lambda payload: '@TypeOf(.{})', 'anyerror_void_error_union': lambda payload: 'anyerror!void', - 'const_slice_u8': lambda payload: '[]const u8', - 'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8', + 'slice_const_u8': lambda payload: '[]const u8', + 'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8', 'fn_noreturn_no_args': lambda payload: 'fn() noreturn', 'fn_void_no_args': lambda payload: 'fn() void', 'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn', @@ -560,7 +560,7 @@ type_tag_handlers = { 'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload), 'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload), 'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload), - 'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), + 'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), 'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload), 'int_signed': lambda payload: 'i%d' % payload.unsigned, 'int_unsigned': lambda payload: 'u%d' % payload.unsigned, diff --git a/tools/stage2_gdb_pretty_printers.py b/tools/stage2_gdb_pretty_printers.py index bd64916536..f10e924855 100644 --- a/tools/stage2_gdb_pretty_printers.py +++ b/tools/stage2_gdb_pretty_printers.py @@ -18,7 +18,7 @@ class TypePrinter: 'many_mut_pointer': 'Type.Payload.ElemType', 'c_const_pointer': 'Type.Payload.ElemType', 'c_mut_pointer': 'Type.Payload.ElemType', - 'const_slice': 'Type.Payload.ElemType', + 'slice_const': 'Type.Payload.ElemType', 'mut_slice': 'Type.Payload.ElemType', 'optional': 'Type.Payload.ElemType', 'optional_single_mut_pointer': 'Type.Payload.ElemType', -- cgit v1.2.3 From 1a4626d2cf8b9985833f97b6fea6ea03011ada4e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 05:47:25 -0400 Subject: InternPool: remove more legacy values Reinstate some tags that will be needed for comptime init. --- src/Air.zig | 7 - src/InternPool.zig | 60 +- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 5 +- src/Module.zig | 266 ++--- src/Sema.zig | 2498 ++++++++++++++++++++++-------------------- src/TypedValue.zig | 24 +- src/arch/aarch64/CodeGen.zig | 2 - src/arch/arm/CodeGen.zig | 2 - src/arch/riscv64/CodeGen.zig | 2 - src/arch/sparc64/CodeGen.zig | 2 - src/arch/wasm/CodeGen.zig | 27 +- src/arch/x86_64/CodeGen.zig | 5 +- src/codegen.zig | 144 --- src/codegen/c.zig | 21 +- src/codegen/llvm.zig | 326 +----- src/codegen/spirv.zig | 73 +- src/print_air.zig | 3 +- src/value.zig | 979 +++++++++-------- 19 files changed, 2063 insertions(+), 2391 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 9dcbe174ec..4f36cf8bc1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -400,8 +400,6 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, - /// A comptime-known type. Uses the `ty` field. - const_ty, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1257,8 +1255,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .error_set_has_value, => return Type.bool, - .const_ty => return Type.type, - .alloc, .ret_ptr, .err_return_trace, @@ -1435,7 +1431,6 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); return switch (air_tags[inst_index]) { - .const_ty => air_datas[inst_index].ty, .interned => air_datas[inst_index].interned.toType(), else => unreachable, }; @@ -1501,7 +1496,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], - .const_ty => unreachable, .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1658,7 +1652,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_vector, .cmp_vector_optimized, .constant, - .const_ty, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index ec4d1df45f..1dc43a467d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -553,10 +553,10 @@ pub const Key = union(enum) { pub const Addr = union(enum) { decl: Module.Decl.Index, mut_decl: MutDecl, + comptime_field: Index, int: Index, eu_payload: Index, opt_payload: Index, - comptime_field: Index, elem: BaseIndex, field: BaseIndex, @@ -703,24 +703,27 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); switch (ip.indexToKey(aggregate.ty)) { - .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), - .elems => |elems| { - var buffer: Key.Int.Storage.BigIntSpace = undefined; - for (elems) |elem| std.hash.autoHash( - hasher, - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - }, - .repeated_elem => |elem| { - const len = ip.aggregateTypeLen(aggregate.ty); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable; - var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); - }, + .array_type => |array_type| if (array_type.child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + } + return; }, else => {}, } @@ -2860,6 +2863,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .array_type => |array_type| { assert(array_type.child != .none); + assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); if (std.math.cast(u32, array_type.len)) |len| { if (array_type.sentinel == .none) { @@ -3230,7 +3234,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { - assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => {}, + else => assert(ip.indexToKey(int.ty) == .int_type), + } switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { diff --git a/src/Liveness.zig b/src/Liveness.zig index 856123fa9d..c30708e140 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -323,7 +323,6 @@ pub fn categorizeOperand( .alloc, .ret_ptr, .constant, - .const_ty, .interned, .trap, .breakpoint, @@ -975,7 +974,6 @@ fn analyzeInst( => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), .constant, - .const_ty, .interned, => unreachable, @@ -1272,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1308,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1842,7 +1840,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty, .interned => return, + .constant, .interned => return, else => {}, } diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 923e6f5658..703d561559 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -43,7 +43,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .alloc, .ret_ptr, .constant, - .const_ty, .interned, .breakpoint, .dbg_stmt, @@ -557,7 +556,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndexAllowNone(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty, .interned => {}, + .constant, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -579,7 +578,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty, .interned => unreachable, + .constant, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); diff --git a/src/Module.zig b/src/Module.zig index fa24c237b4..47f7643b9f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -85,20 +85,13 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// This is a temporary addition to stage2 in order to match legacy behavior, -/// however the end-game once the lang spec is settled will be to use a global -/// InternPool for comptime memoized objects, making this behavior consistent across all types, -/// not only string literals. Or, we might decide to not guarantee string literals -/// to have equal comptime pointers, in which case this field can be deleted (perhaps -/// the commit that introduced it can simply be reverted). -/// This table uses an optional index so that when a Decl is destroyed, the string literal -/// is still reclaimable by a future Decl. -string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, -string_literal_bytes: ArrayListUnmanaged(u8) = .{}, - /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// This is currently only used for string literals, however the end-game once the lang spec +/// is settled will be to make this behavior consistent across all types. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -208,39 +201,6 @@ pub const CImportError = struct { } }; -pub const StringLiteralContext = struct { - bytes: *ArrayListUnmanaged(u8), - - pub const Key = struct { - index: u32, - len: u32, - }; - - pub fn eql(self: @This(), a: Key, b: Key) bool { - _ = self; - return a.index == b.index and a.len == b.len; - } - - pub fn hash(self: @This(), x: Key) u64 { - const x_slice = self.bytes.items[x.index..][0..x.len]; - return std.hash_map.hashString(x_slice); - } -}; - -pub const StringLiteralAdapter = struct { - bytes: *ArrayListUnmanaged(u8), - - pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool { - const b_slice = self.bytes.items[b.index..][0..b.len]; - return mem.eql(u8, a_slice, b_slice); - } - - pub fn hash(self: @This(), adapted_key: []const u8) u64 { - _ = self; - return std.hash_map.hashString(adapted_key); - } -}; - const MonomorphedFuncsSet = std.HashMapUnmanaged( Fn.Index, void, @@ -660,14 +620,8 @@ pub const Decl = struct { } mod.destroyFunc(func); } + _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { - if (decl.owns_tv) { - if (decl.val.castTag(.str_lit)) |str_lit| { - mod.string_literal_table.getPtrContext(str_lit.data, .{ - .bytes = &mod.string_literal_bytes, - }).?.* = .none; - } - } value_arena.deinit(gpa); decl.value_arena = null; decl.has_tv = false; @@ -834,7 +788,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; - return mod.intern_pool.indexToStructType(decl.val.ip_index); + return mod.intern_pool.indexToStructType(decl.val.toIntern()); } /// If the Decl has a value and it is a union, return it, @@ -875,7 +829,7 @@ pub const Decl = struct { return switch (decl.val.ip_index) { .empty_struct_type => .none, .none => .none, - else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), @@ -919,7 +873,7 @@ pub const Decl = struct { pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .variable => |variable| variable.is_extern, .extern_func => true, else => false, @@ -1577,11 +1531,11 @@ pub const Fn = struct { ip: *InternPool, gpa: Allocator, ) !void { - switch (err_set_ty.ip_index) { + switch (err_set_ty.toIntern()) { .anyerror_type => { self.is_anyerror = true; }, - else => switch (ip.indexToKey(err_set_ty.ip_index)) { + else => switch (ip.indexToKey(err_set_ty.toIntern())) { .error_set_type => |error_set_type| { for (error_set_type.names) |name| { try self.errors.put(gpa, name, {}); @@ -3396,8 +3350,7 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); - mod.string_literal_table.deinit(gpa); - mod.string_literal_bytes.deinit(gpa); + mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); } @@ -4702,7 +4655,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { @@ -4749,10 +4702,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.ip_index) { + switch (decl_tv.val.toIntern()) { .generic_poison => unreachable, .unreachable_value => unreachable, - else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4792,7 +4745,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, .extern_func, .func => .function, else => .constant, @@ -6497,40 +6450,33 @@ pub fn populateTestFunctions( const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ - .len = test_fn_vals.len, - .child = test_fn_ty.ip_index, - .sentinel = .none, - }), - .val = try Value.Tag.aggregate.create(arena, test_fn_vals), - }); - const array_decl = mod.declPtr(array_decl_index); + const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + defer gpa.free(test_fn_vals); // Add a dependency on each test name and function pointer. - try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); + var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){}; + defer array_decl_dependencies.deinit(gpa); + try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys(), 0..) |test_decl_index, i| { + for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_name_slice = mem.sliceTo(test_decl.name, 0); const test_name_decl_index = n: { - var name_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), - .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), + const test_decl_name = mem.span(test_decl.name); + const test_name_decl_ty = try mod.arrayType(.{ + .len = test_decl_name.len, + .child = .u8_type, + }); + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = test_name_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = test_name_decl_ty.toIntern(), + .storage = .{ .bytes = test_decl_name }, + } })).toValue(), }); - try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); + array_decl_dependencies.appendAssumeCapacity(test_decl_index); + array_decl_dependencies.appendAssumeCapacity(test_name_decl_index); try mod.linkerUpdateDecl(test_name_decl_index); const test_fn_fields = .{ @@ -6541,36 +6487,51 @@ pub fn populateTestFunctions( } }), // func try mod.intern(.{ .ptr = .{ - .ty = test_decl.ty.ip_index, + .ty = test_decl.ty.toIntern(), .addr = .{ .decl = test_decl_index }, } }), // async_frame_size null_usize, }; - test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ - .ty = test_fn_ty.ip_index, + test_fn_val.* = try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), .storage = .{ .elems = &test_fn_fields }, - } })).toValue(); + } }); + } + + const array_decl_ty = try mod.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = array_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } })).toValue(), + }); + for (array_decl_dependencies.items) |array_decl_dependency| { + try mod.declareDeclDependency(array_decl_index, array_decl_dependency); } - try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl_index; }; try mod.linkerUpdateDecl(array_decl_index); { const new_ty = try mod.ptrType(.{ - .elem_type = test_fn_ty.ip_index, + .elem_type = test_fn_ty.toIntern(), .is_const = true, .size = .Slice, }); const new_val = decl.val; const new_init = try mod.intern(.{ .ptr = .{ - .ty = new_ty.ip_index, + .ty = new_ty.toIntern(), .addr = .{ .decl = array_decl_index }, - .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), } }); - mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); + mod.intern_pool.mutateVarInit(decl.val.toIntern(), new_init); // Since we are replacing the Decl's value we must perform cleanup on the // previous value. @@ -6650,47 +6611,32 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - switch (val.ip_index) { - .none => switch (val.tag()) { - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } - }, - .@"union" => { - const data = val.castTag(.@"union").?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); - }, - else => {}, + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .variable => |variable| mod.markDeclIndexAlive(variable.decl), - .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), - .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), - .error_union => |error_union| switch (error_union.val) { - .err_name => {}, - .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl => |decl| mod.markDeclIndexAlive(decl), - .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), - .int, .comptime_field => {}, - .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), - .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), - } - if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); - }, - .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), - .aggregate => |aggregate| for (aggregate.storage.values()) |elem| - mod.markReferencedDeclsAlive(elem.toValue()), - .un => |un| { - mod.markReferencedDeclsAlive(un.tag.toValue()); - mod.markReferencedDeclsAlive(un.val.toValue()); - }, - else => {}, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); }, + else => {}, } } @@ -6796,11 +6742,11 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index }); + return ptrType(mod, .{ .elem_type = child_type.toIntern() }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); + return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true }); } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { @@ -6871,9 +6817,9 @@ pub fn errorSetFromUnsortedNames( pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { const i = try intern(mod, .{ .opt = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .val = try intern(mod, .{ .ptr = .{ - .ty = ty.childType(mod).ip_index, + .ty = ty.childType(mod).toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6890,7 +6836,7 @@ pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6906,7 +6852,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er assert(tag == .Enum); } const i = try intern(mod, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = tag_int, } }); return i.toValue(); @@ -6917,12 +6863,12 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { const ip = &mod.intern_pool; const gpa = mod.gpa; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; if (enum_type.values.len == 0) { // Auto-numbered fields. return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try ip.get(gpa, .{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = field_index }, @@ -6931,7 +6877,7 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E } return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = enum_type.values[field_index], } })).toValue(); } @@ -6950,7 +6896,7 @@ pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .big_int = x }, } }); return i.toValue(); @@ -6958,7 +6904,7 @@ pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Valu pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .u64 = x }, } }); return i.toValue(); @@ -6966,7 +6912,7 @@ pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .i64 = x }, } }); return i.toValue(); @@ -6974,9 +6920,9 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { const i = try intern(mod, .{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), } }); return i.toValue(); } @@ -6993,7 +6939,7 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { else => unreachable, }; const i = try intern(mod, .{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = storage, } }); return i.toValue(); @@ -7001,9 +6947,9 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { const ip = &mod.intern_pool; - assert(ip.isOptionalType(opt_ty.ip_index)); + assert(ip.isOptionalType(opt_ty.toIntern())); const result = try ip.get(mod.gpa, .{ .opt = .{ - .ty = opt_ty.ip_index, + .ty = opt_ty.toIntern(), .val = .none, } }); return result.toValue(); @@ -7042,7 +6988,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef(mod)); - const key = mod.intern_pool.indexToKey(val.ip_index); + const key = mod.intern_pool.indexToKey(val.toIntern()); switch (key.int.storage) { .i64 => |x| { if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); @@ -7221,19 +7167,19 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { if (ty.ip_index == .none) return null; - const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; + const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { if (ty.ip_index == .none) return null; - const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; + const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null; return mod.unionPtr(union_index); } pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { if (ty.ip_index == .none) return null; - return mod.intern_pool.indexToFuncType(ty.ip_index); + return mod.intern_pool.indexToFuncType(ty.toIntern()); } pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { @@ -7243,7 +7189,7 @@ pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { if (ty.ip_index == .none) return .none; - return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index); + return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); } pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @@ -7268,5 +7214,5 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu } pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { - return mod.intern_pool.toEnum(E, val.ip_index); + return mod.intern_pool.toEnum(E, val.toIntern()); } diff --git a/src/Sema.zig b/src/Sema.zig index d9b346e638..4478f26bf4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1866,9 +1866,9 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, @@ -1887,10 +1887,10 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, @@ -1930,7 +1930,7 @@ fn resolveMaybeUndefVal( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .none => return val, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return null, else => return val, }, @@ -1950,7 +1950,7 @@ fn resolveMaybeUndefValIntable( while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, .none => break, - else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(check.toIntern())) { .variable => return null, .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return null, @@ -2007,7 +2007,6 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => return air_datas[i].ty.toValue(), .interned => return air_datas[i].interned.toValue(), else => return null, } @@ -2490,7 +2489,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .mut_decl = .{ .decl = iac.data.decl_index, .runtime_index = block.runtime_index, @@ -2988,7 +2987,7 @@ fn zirEnumDecl( if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index); + incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern()); break :ty ty; } else if (fields_len == 0) { break :ty try mod.intType(.unsigned, 0); @@ -2998,7 +2997,7 @@ fn zirEnumDecl( } }; - if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) { + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } @@ -3051,7 +3050,7 @@ fn zirEnumDecl( else => |e| return e, }; last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, @@ -3071,7 +3070,7 @@ fn zirEnumDecl( else try mod.intValue(int_tag_ty, 0); last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { @@ -3742,7 +3741,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.maybeQueueFuncBodyAnalysis(decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_ptr_ty.ip_index, + .ty = final_ptr_ty.toIntern(), .addr = if (var_is_mut) .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -3842,7 +3841,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_elem_ty.ip_index, + .ty = final_elem_ty.toIntern(), .addr = .{ .decl = new_decl_index }, } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const @@ -4341,12 +4340,12 @@ fn validateUnionInit( block.instructions.shrinkRetainingCapacity(first_block_index); var union_val = try mod.intern(.{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag_val.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val.toIntern(), } }); if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ - .ty = union_ty.ip_index, + .ty = union_ty.toIntern(), .val = union_val, } }); const union_init = try sema.addConstant(union_ty, union_val.toValue()); @@ -4417,7 +4416,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4476,15 +4475,14 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); - defer sema.gpa.free(field_values); + const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv.ip_index; + field_values[i] = opv.toIntern(); continue; } @@ -4549,7 +4547,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val.ip_index; + field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4563,7 +4561,7 @@ fn validateStructInit( } const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4583,7 +4581,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val.ip_index; + field_values[i] = default_val.toIntern(); } if (root_msg) |msg| { @@ -4607,11 +4605,11 @@ fn validateStructInit( block.instructions.shrinkRetainingCapacity(first_block_index); var struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_values }, } }); if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .val = struct_val, } }); const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); @@ -4659,7 +4657,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4710,8 +4708,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); - defer sema.gpa.free(element_vals); + const element_vals = try sema.arena.alloc(InternPool.Index, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4721,13 +4718,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } @@ -4788,7 +4785,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val.ip_index; + element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } @@ -4800,7 +4797,7 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .comptime_field => return, // This store was validated by the individual elem ptrs. else => {}, @@ -4813,17 +4810,17 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val.ip_index; + element_vals[instrs.len] = sentinel_val.toIntern(); } block.instructions.shrinkRetainingCapacity(first_block_index); var array_val = try mod.intern(.{ .aggregate = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .val = array_val, } }); const array_init = try sema.addConstant(array_ty, array_val.toValue()); @@ -5144,41 +5141,26 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air // expression of a variable declaration. const mod = sema.mod; const gpa = sema.gpa; - const string_bytes = &mod.string_literal_bytes; - const StringLiteralAdapter = Module.StringLiteralAdapter; - const StringLiteralContext = Module.StringLiteralContext; - try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{ - .bytes = string_bytes, - }, StringLiteralContext{ - .bytes = string_bytes, + const ty = try mod.arrayType(.{ + .len = zir_bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = zir_bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, zir_bytes.len), - }; - string_bytes.appendSliceAssumeCapacity(zir_bytes); - gop.value_ptr.* = .none; - } - const decl_index = gop.value_ptr.unwrap() orelse di: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const decl_index = try anon_decl.finish( - try Type.array(anon_decl.arena(), gop.key_ptr.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), - 0, // default alignment - ); - - // Needed so that `Decl.clearValues` will additionally set the corresponding - // string literal table value back to `Decl.OptionalIndex.none`. - mod.declPtr(decl_index).owns_tv = true; + const decl_index = try anon_decl.finish(ty, val.toValue(), 0); - gop.value_ptr.* = decl_index.toOptional(); - break :di decl_index; - }; - return sema.analyzeDeclRef(decl_index); + gop.key_ptr.* = val; + gop.value_ptr.* = decl_index; + } + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6218,7 +6200,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, .ptr => |ptr| switch (ptr.addr) { @@ -6792,7 +6774,7 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), @@ -6996,7 +6978,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty.ip_index; + new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -7289,7 +7271,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7328,7 +7310,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7426,7 +7408,7 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7911,7 +7893,7 @@ fn resolveGenericInstantiationType( } new_decl.val = (try mod.intern(.{ .func = .{ - .ty = new_decl.ty.ip_index, + .ty = new_decl.ty.toIntern(), .index = new_func, } })).toValue(); new_decl.@"align" = 0; @@ -7932,7 +7914,7 @@ fn resolveGenericInstantiationType( fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const mod = sema.mod; - const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| tuple, else => return, }; @@ -7957,7 +7939,7 @@ fn emitDbgInline( if (old_func == new_func) return; try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.ip_index, + .ty = new_func_ty.toIntern(), .index = new_func, } })).toValue()); _ = try block.addInst(.{ @@ -8019,7 +8001,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try mod.vectorType(.{ .len = len, - .child = elem_type.ip_index, + .child = elem_type.toIntern(), }); return sema.addType(vector_type); } @@ -8129,7 +8111,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const kv = try sema.mod.getErrorValue(name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); } @@ -8149,7 +8131,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addConstant(Type.err_int, try mod.intValue( Type.err_int, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, @@ -8240,7 +8222,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) { + if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } @@ -8445,8 +8427,8 @@ fn analyzeOptionalPayloadPtr( _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { @@ -8455,8 +8437,8 @@ fn analyzeOptionalPayloadPtr( } // The same Value represents the pointer to the optional and the payload. return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8565,7 +8547,7 @@ fn analyzeErrUnionPayload( } return sema.addConstant( payload_ty, - mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload.toValue(), ); } @@ -8633,8 +8615,8 @@ fn analyzeErrUnionPayloadPtr( _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { @@ -8642,8 +8624,8 @@ fn analyzeErrUnionPayloadPtr( return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8828,7 +8810,7 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.ip_index == .type_type) { + if (dest_ty.toIntern() == .type_type) { return Value.generic_poison_type; } else { return Value.generic_poison; @@ -9183,7 +9165,7 @@ fn funcCommon( if (is_extern) { return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( gpa, @@ -9223,7 +9205,7 @@ fn funcCommon( .is_noinline = is_noinline, }; return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .index = new_func_index, } })).toValue()); } @@ -10151,16 +10133,16 @@ fn zirSwitchCapture( .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_val.ip_index, + .base = union_val.toIntern(), .index = field_index, } }, } })).toValue()); } return sema.addConstant( field_ty, - mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + mod.intern_pool.indexToKey(union_val.toIntern()).un.val.toValue(), ); } if (is_ref) { @@ -10256,9 +10238,9 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ - .ty = field_ty_ptr.ip_index, + .ty = field_ty_ptr.toIntern(), .addr = .{ .field = .{ - .base = op_ptr_val.ip_index, + .base = op_ptr_val.toIntern(), .index = first_field_index, } }, } })).toValue()); @@ -11502,7 +11484,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError cases_len += 1; const item_val = try mod.intern(.{ .err = .{ - .ty = operand_ty.ip_index, + .ty = operand_ty.toIntern(), .name = error_name_ip, } }); const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); @@ -11802,7 +11784,7 @@ fn validateSwitchItemError( const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.toIntern()).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -12035,7 +12017,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ip = &mod.intern_pool; const has_field = hf: { - switch (ip.indexToKey(ty.ip_index)) { + switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => { if (mem.eql(u8, field_name, "ptr")) break :hf true; @@ -12160,17 +12142,23 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; - - // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at + // TODO instead of using `.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. + const ty = try mod.arrayType(.{ + .len = embed_file.bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), + ty, + (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = embed_file.bytes }, + } })).toValue(), 0, // default alignment ); @@ -12186,7 +12174,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = mod.intern_pool.getString(kv.key).unwrap().?, } })).toValue()); } @@ -12597,15 +12585,15 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod)).intern(scalar_type, mod); } - return sema.addConstant( - operand_type, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ + .ty = operand_type.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); return sema.addConstant(operand_type, result_val); @@ -12652,22 +12640,22 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, mod).ip_index; + types[i] = lhs_ty.structFieldType(i, mod).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, mod); - values[i] = default_val.ip_index; + values[i] = default_val.toIntern(); const operand_src = lhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index; + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, mod); - values[i + lhs_len] = default_val.ip_index; + values[i + lhs_len] = default_val.toIntern(); const operand_src = rhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i + lhs_len] = .none; } @@ -12824,34 +12812,32 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else rhs_val; - const final_len_including_sent = result_len + @boolToInt(res_sent_val != null); - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; - } - if (res_sent_val) |sent_val| { - element_vals[result_len] = sent_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } - const val = try Value.Tag.aggregate.create(sema.arena, element_vals); - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue(), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; @@ -12978,8 +12964,8 @@ fn analyzeTupleMul( const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { - types[i] = operand_ty.structFieldType(i, mod).ip_index; - values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index; + types[i] = operand_ty.structFieldType(i, mod).toIntern(); + values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = lhs_src; // TODO better source location if (values[i] == .unreachable_value) { runtime_src = operand_src; @@ -13086,8 +13072,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_len == 1) { const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, - .storage = .{ .repeated_elem = elem_val.ip_index }, + .ty = result_ty.toIntern(), + .storage = .{ .repeated_elem = elem_val.toIntern() }, } }); } @@ -13097,16 +13083,15 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); - assert(elem_val.ip_index != .none); - element_vals[elem_i] = elem_val.ip_index; + element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val.ip_index; + element_vals[result_len] = sent_val.toIntern(); } break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); }; @@ -13998,8 +13983,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. else => unreachable, }; const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ - .ty = resolved_type.ip_index, - .storage = .{ .repeated_elem = scalar_zero.ip_index }, + .ty = resolved_type.toIntern(), + .storage = .{ .repeated_elem = scalar_zero.toIntern() }, } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } @@ -14079,14 +14064,17 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intRemScalar(lhs, rhs, ty); } @@ -14517,11 +14505,13 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - const values = try sema.arena.alloc(Value, 2); - values[0] = result.wrapped; - values[1] = result.overflow_bit; - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + return sema.addConstant(tuple_ty, (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = &.{ + result.wrapped.toIntern(), + result.overflow_bit.toIntern(), + } }, + } })).toValue()); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); @@ -14534,8 +14524,8 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; const repeated = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = val.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = val.toIntern() }, } }); return repeated.toValue(); } @@ -14547,7 +14537,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { .child = .u1_type, }) else Type.u1; - const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index }; + const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = &types, @@ -15731,7 +15721,7 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15759,7 +15749,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15789,7 +15779,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value) { + if (tv.val.toIntern() == .unreachable_value) { assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, tv.ty); @@ -15840,10 +15830,17 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const name = mem.span(fn_owner_decl.name); - const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15857,9 +15854,17 @@ fn zirBuiltinSrc( defer anon_decl.deinit(); // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15877,13 +15882,13 @@ fn zirBuiltinSrc( // line: u32, try mod.intern(.{ .runtime_value = .{ .ty = .u32_type, - .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + .val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), } }), // column: u32, - (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), }; return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ - .ty = src_loc_ty.ip_index, + .ty = src_loc_ty.toIntern(), .storage = .{ .elems = &fields }, } })).toValue()); } @@ -15908,8 +15913,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Null, .EnumLiteral, => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).toIntern(), .val = .void_value, } })).toValue()), .Fn => { @@ -15941,8 +15946,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_info_ty = param_info_decl.val.toType(); - const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); - defer gpa.free(param_vals); + const param_vals = try sema.arena.alloc(InternPool.Index, info.param_types.len); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ @@ -15957,40 +15961,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic).ip_index, + Value.makeBool(is_generic).toIntern(), // is_noalias: bool, - Value.makeBool(is_noalias).ip_index, + Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; param_val.* = try mod.intern(.{ .aggregate = .{ - .ty = param_info_ty.ip_index, + .ty = param_info_ty.toIntern(), .storage = .{ .elems = ¶m_fields }, } }); } const args_val = v: { const args_slice_ty = try mod.ptrType(.{ - .elem_type = param_info_ty.ip_index, + .elem_type = param_info_ty.toIntern(), .size = .Slice, .is_const = true, }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_info_ty.ip_index, + .child = param_info_ty.toIntern(), .sentinel = .none, }), (try mod.intern(.{ .aggregate = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .storage = .{ .elems = param_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), } }); }; @@ -16003,43 +16007,55 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // calling_convention: CallingConvention, - (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic).ip_index, + Value.makeBool(info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args).ip_index, + Value.makeBool(info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = fn_info_ty.ip_index, + .ty = fn_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Int => { + const int_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Int", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); + try sema.ensureDeclAnalyzed(int_info_decl_index); + const int_info_decl = mod.declPtr(int_info_decl_index); + const int_info_ty = int_info_decl.val.toType(); + const signedness_ty = try sema.getBuiltinType("Signedness"); const info = ty.intInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // signedness: Signedness, - field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness)); - // bits: u16, - field_values[1] = try mod.intValue(Type.u16, info.bits); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // signedness: Signedness, + try (try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness))).intern(signedness_ty, mod), + // bits: u16, + (try mod.intValue(Type.u16, info.bits)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = int_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Float => { const float_info_decl_index = (try sema.namespaceLookup( @@ -16051,17 +16067,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); - const float_ty = float_info_decl.val.toType(); + const float_info_ty = float_info_decl.val.toType(); const field_vals = .{ // bits: u16, - (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = float_ty.ip_index, + .ty = float_info_ty.toIntern(), .storage = .{ .elems = &field_vals }, } }), } })).toValue()); @@ -16099,80 +16115,121 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([8]Value); - field_values.* = .{ + const field_values = .{ // size: Size, - try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)), + try (try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size))).intern(ptr_size_ty, mod), // is_const: bool, - Value.makeBool(!info.mutable), + Value.makeBool(!info.mutable).toIntern(), // is_volatile: bool, - Value.makeBool(info.@"volatile"), + Value.makeBool(info.@"volatile").toIntern(), // alignment: comptime_int, - alignment, + alignment.toIntern(), // address_space: AddressSpace - try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), + try (try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace"))).intern(addrspace_ty, mod), // child: type, - info.pointee_type.toValue(), + info.pointee_type.toIntern(), // is_allowzero: bool, - Value.makeBool(info.@"allowzero"), + Value.makeBool(info.@"allowzero").toIntern(), // sentinel: ?*const anyopaque, - try sema.optRefValue(block, info.pointee_type, info.sentinel), + (try sema.optRefValue(block, info.pointee_type, info.sentinel)).toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = pointer_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Array => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 3); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); - // sentinel: ?*const anyopaque, - field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); + const array_field_ty = t: { + const array_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Array", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); + try sema.ensureDeclAnalyzed(array_field_ty_decl_index); + const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); + break :t array_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = array_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Vector => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); + const vector_field_ty = t: { + const vector_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Vector", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); + try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); + const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); + break :t vector_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = vector_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Optional => { - const field_values = try sema.arena.alloc(Value, 1); - // child: type, - field_values[0] = ty.optionalChild(mod).toValue(); + const optional_field_ty = t: { + const optional_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Optional", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); + try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); + const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); + break :t optional_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // child: type, + ty.optionalChild(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = optional_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .ErrorSet => { var fields_anon_decl = try block.startAnonDecl(); @@ -16202,21 +16259,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Value can be zero-length slice otherwise const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try gpa.alloc(InternPool.Index, names.len); - defer gpa.free(vals); + const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16226,7 +16289,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai name_val, }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = error_field_ty.ip_index, + .ty = error_field_ty.toIntern(), .storage = .{ .elems = &error_field_fields }, } }); } @@ -16236,39 +16299,39 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our ?[]const Error value const slice_errors_ty = try mod.ptrType(.{ - .elem_type = error_field_ty.ip_index, + .elem_type = error_field_ty.toIntern(), .size = .Slice, .is_const = true, }); - const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { const array_errors_ty = try mod.arrayType(.{ .len = vals.len, - .child = error_field_ty.ip_index, + .child = error_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_errors_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_errors_ty.ip_index, + .ty = array_errors_ty.toIntern(), .storage = .{ .elems = vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = slice_errors_ty.ip_index, + .ty = slice_errors_ty.toIntern(), .addr = .{ .decl = new_decl }, } }); } else .none; const errors_val = try mod.intern(.{ .opt = .{ - .ty = opt_slice_errors_ty.ip_index, + .ty = opt_slice_errors_ty.toIntern(), .val = errors_payload_val, } }); // Construct Type{ .ErrorSet = errors_val } return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).toIntern(), .val = errors_val, } })).toValue()); }, @@ -16288,22 +16351,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // error_set: type, - ty.errorUnionSet(mod).ip_index, + ty.errorUnionSet(mod).toIntern(), // payload: type, - ty.errorUnionPayload(mod).ip_index, + ty.errorUnionPayload(mod).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = error_union_field_ty.ip_index, + .ty = error_union_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); @@ -16323,23 +16386,28 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); - defer gpa.free(enum_field_vals); - + const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16348,10 +16416,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - (try mod.intValue(Type.comptime_int, i)).ip_index, + (try mod.intValue(Type.comptime_int, i)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = enum_field_ty.ip_index, + .ty = enum_field_ty.toIntern(), .storage = .{ .elems = &enum_field_fields }, } }); } @@ -16359,23 +16427,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const fields_array_ty = try mod.arrayType(.{ .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, + .child = enum_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( fields_array_ty, (try mod.intern(.{ .aggregate = .{ - .ty = fields_array_ty.ip_index, + .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = enum_field_ty.ip_index, + .elem_type = enum_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, } }); }; @@ -16403,13 +16471,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive.ip_index, + is_exhaustive.toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_enum_ty.ip_index, + .ty = type_enum_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16460,14 +16528,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16481,12 +16556,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = union_field_ty.ip_index, + .ty = union_field_ty.toIntern(), .storage = .{ .elems = &union_field_fields }, } }); } @@ -16494,33 +16569,33 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = union_field_vals.len, - .child = union_field_ty.ip_index, + .child = union_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = union_field_ty.ip_index, + .elem_type = union_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, - .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, } }); const container_layout_ty = t: { @@ -16538,7 +16613,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -16548,10 +16623,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_union_ty.ip_index, + .ty = type_union_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16595,7 +16670,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { - const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( @@ -16611,16 +16686,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // https://github.com/ziglang/zig/issues/15709 @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); + try std.fmt.allocPrint(sema.arena, "{d}", .{i}); + const new_decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), } }); }; @@ -16633,14 +16716,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(is_comptime).ip_index, + Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), }; struct_field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16660,20 +16743,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; - const opt_default_val = if (field.default_val.ip_index == .unreachable_value) + const opt_default_val = if (field.default_val.toIntern() == .unreachable_value) null else field.default_val; @@ -16684,16 +16774,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime).ip_index, + Value.makeBool(field.is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16702,37 +16792,37 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, + .child = struct_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = struct_field_ty.ip_index, + .elem_type = struct_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); const backing_integer_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, + .ty = (try mod.optionalType(.type_type)).toIntern(), .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - break :val struct_obj.backing_int_ty.ip_index; + break :val struct_obj.backing_int_ty.toIntern(); } else .none, } }); @@ -16751,7 +16841,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16759,13 +16849,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)).ip_index, + Value.makeBool(struct_ty.isTuple(mod)).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_struct_ty.ip_index, + .ty = type_struct_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16794,10 +16884,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_opaque_ty.ip_index, + .ty = type_opaque_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16845,25 +16935,25 @@ fn typeInfoDecls( const array_decl_ty = try mod.arrayType(.{ .len = decl_vals.items.len, - .child = declaration_ty.ip_index, + .child = declaration_ty.toIntern(), .sentinel = .none, }); const new_decl = try decls_anon_decl.finish( array_decl_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_decl_ty.ip_index, + .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } })).toValue(), 0, // default alignment ); return try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = declaration_ty.ip_index, + .elem_type = declaration_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), } }); } @@ -16892,16 +16982,24 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); + const name = mem.span(decl.name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16909,10 +17007,10 @@ fn typeInfoNamespaceDecls( //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub).ip_index, + Value.makeBool(decl.is_pub).toIntern(), }; try decl_vals.append(try mod.intern(.{ .aggregate = .{ - .ty = declaration_ty.ip_index, + .ty = declaration_ty.toIntern(), .storage = .{ .elems = &fields }, } })); } @@ -16985,7 +17083,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return mod.vectorType(.{ .len = operand.vectorLen(mod), - .child = log2_elem_ty.ip_index, + .child = log2_elem_ty.toIntern(), }); }, else => {}, @@ -17527,7 +17625,7 @@ fn zirRetErrValue( const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); return sema.analyzeRet(block, result_inst, src); @@ -17854,9 +17952,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, else => {}, }, else => {}, @@ -17985,7 +18083,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com } } return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ - .ty = obj_ty.ip_index, + .ty = obj_ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue()); } @@ -18021,10 +18119,11 @@ fn unionInit( const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = init_val, - })); + return sema.addConstant(union_ty, (try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try init_val.intern(field.ty, mod), + } })).toValue()); } try sema.requireRuntimeBlock(block, init_src, null); @@ -18125,12 +18224,12 @@ fn zirStructInit( const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { - return sema.addConstantMaybeRef( - block, - resolved_ty, - try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), - is_ref, - ); + const field = resolved_ty.unionFields(mod).values()[field_index]; + return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try val.intern(field.ty, mod), + } })).toValue(), is_ref); } if (is_ref) { @@ -18171,7 +18270,7 @@ fn finishStructInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { if (field_inits[i] != .none) continue; @@ -18204,7 +18303,7 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -18250,7 +18349,7 @@ fn finishStructInit( .intern(struct_ty.structFieldType(field_i, mod), mod); } const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = elems }, } }); return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); @@ -18331,7 +18430,7 @@ fn zirStructInitAnon( gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init).ip_index; + field_ty.* = sema.typeOf(init).toIntern(); if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); @@ -18464,15 +18563,19 @@ fn zirArrayInit( } else null; const runtime_index = opt_runtime_index orelse { - const elem_vals = try sema.arena.alloc(Value, resolved_args.len); - - for (resolved_args, 0..) |arg, i| { + const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); + for (elem_vals, resolved_args, 0..) |*val, arg, i| { + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) + else + array_ty.elemType2(mod); // We checked that all args are comptime above. - elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?; + val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod); } - - const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals); - return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref); + return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = elem_vals }, + } })).toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { @@ -18548,7 +18651,7 @@ fn zirArrayInitAnon( for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem).ip_index; + types[i] = sema.typeOf(elem).toIntern(); if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); @@ -18560,7 +18663,7 @@ fn zirArrayInitAnon( return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val.ip_index; + values[i] = val.toIntern(); } else { values[i] = .none; runtime_src = operand_src; @@ -18676,7 +18779,7 @@ fn fieldType( const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; switch (cur_ty.zigTypeTag(mod)) { - .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return sema.addType(anon_struct.types[field_index].toType()); @@ -18698,7 +18801,7 @@ fn fieldType( .Optional => { // Struct/array init through optional requires the child type to not be a pointer. // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = mod.intern_pool.indexToKey(cur_ty.ip_index).opt_type.toType(); + cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType(); continue; }, .ErrorUnion => { @@ -18776,7 +18879,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18820,21 +18923,21 @@ fn zirUnaryMath( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); + elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -18867,7 +18970,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const tag_name = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, @@ -18956,7 +19059,7 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); @@ -18966,7 +19069,7 @@ fn zirReify( return sema.addType(ty); }, .Vector => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); @@ -18977,12 +19080,12 @@ fn zirReify( const ty = try mod.vectorType(.{ .len = len, - .child = child_ty.ip_index, + .child = child_ty.toIntern(), }); return sema.addType(ty); }, .Float => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18997,7 +19100,7 @@ fn zirReify( return sema.addType(ty); }, .Pointer => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); @@ -19088,7 +19191,7 @@ fn zirReify( return sema.addType(ty); }, .Array => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); @@ -19107,7 +19210,7 @@ fn zirReify( return sema.addType(ty); }, .Optional => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19116,7 +19219,7 @@ fn zirReify( return sema.addType(ty); }, .ErrorUnion => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); @@ -19155,7 +19258,7 @@ fn zirReify( return sema.addType(ty); }, .Struct => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); @@ -19176,7 +19279,7 @@ fn zirReify( return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); @@ -19517,7 +19620,7 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); @@ -19571,7 +19674,7 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - param_type.* = param_type_val.ip_index; + param_type.* = param_type_val.toIntern(); if (arg_is_noalias) { if (!param_type.toType().isPtrAtRuntime(mod)) { @@ -19733,7 +19836,7 @@ fn reifyStruct( opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); } else Value.@"unreachable"; - if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) { + if (is_comptime_val.toBool(mod) and default_val.toIntern() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -19956,9 +20059,17 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); + const decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); @@ -20125,8 +20236,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint false; } - if (!ip.isInferredErrorSetType(dest_ty.ip_index) and - !ip.isInferredErrorSetType(operand_ty.ip_index)) + if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and + !ip.isInferredErrorSetType(operand_ty.toIntern())) { break :disjoint true; } @@ -20157,7 +20268,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.toIntern()).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20257,11 +20368,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (dest_ty.zigTypeTag(mod) == .Optional) { var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, mod, dest_ptr_info), mod); } else { var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); + break :blk try Type.ptr(sema.arena, mod, dest_ptr_info); } }; @@ -20279,10 +20390,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(sema.mod), operand_align, + operand_ty.fmt(mod), operand_align, }); try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(sema.mod), dest_align, + dest_ty.fmt(mod), dest_align, }); try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); @@ -20296,11 +20407,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .val = operand_val.toIntern(), } })).toValue()); } @@ -20335,7 +20446,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20356,7 +20467,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20382,7 +20493,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_ty = if (is_vector) try mod.vectorType(.{ .len = operand_ty.vectorLen(mod), - .child = dest_scalar_ty.ip_index, + .child = dest_scalar_ty.toIntern(), }) else dest_scalar_ty; @@ -20405,7 +20516,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), + @tagName(dest_info.signedness), operand_ty.fmt(mod), }); } if (operand_info.bits < dest_info.bits) { @@ -20414,7 +20525,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, + .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -20434,18 +20545,18 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!is_vector) { return sema.addConstant( dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), ); } - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); + const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20466,7 +20577,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; - var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + var dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try mod.optionalType(dest_ty.toIntern()); } @@ -20531,22 +20642,22 @@ fn zirBitCount( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = result_scalar_ty.ip_index, + .child = result_scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); + const elem_val = try val.elemValue(mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try mod.intValue(scalar_ty, count); + elem.* = (try mod.intValue(scalar_ty, count)).toIntern(); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); @@ -20580,7 +20691,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(sema.mod), bits }, + .{ scalar_ty.fmt(mod), bits }, ); } @@ -20605,15 +20716,15 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.byteSwap(operand_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20653,15 +20764,15 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20699,7 +20810,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 .Struct => {}, else => { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, ty); break :msg msg; @@ -20738,7 +20849,7 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), } } @@ -20748,7 +20859,7 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), } } @@ -20807,7 +20918,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20820,7 +20931,7 @@ fn checkPtrOperand( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkPtrType( @@ -20838,7 +20949,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20851,7 +20962,7 @@ fn checkPtrType( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkVectorElemType( @@ -20865,7 +20976,7 @@ fn checkVectorElemType( .Int, .Float, .Bool => return, else => if (ty.isPtrAtRuntime(mod)) return, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); } fn checkFloatType( @@ -20877,7 +20988,7 @@ fn checkFloatType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), } } @@ -20894,7 +21005,7 @@ fn checkNumericType( .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), } } @@ -20928,7 +21039,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ), }; @@ -20943,7 +21054,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -20953,7 +21064,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero"; wanted_ptr_data.@"volatile" = ptr_data.@"volatile"; - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -21016,12 +21127,12 @@ fn checkIntOrVector( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21040,12 +21151,12 @@ fn checkIntOrVectorAllowComptime( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21054,7 +21165,7 @@ fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}), } } @@ -21142,7 +21253,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(mod), rhs_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -21161,7 +21272,7 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; const mod = sema.mod; - return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted); + return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -21282,7 +21393,7 @@ fn zirCmpxchg( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -21322,8 +21433,8 @@ fn zirCmpxchg( const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = try mod.intern(.{ .opt = .{ - .ty = result_ty.ip_index, - .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + .ty = result_ty.toIntern(), + .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk .none; } else stored_val.toIntern(), @@ -21363,7 +21474,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I try sema.checkVectorElemType(block, scalar_src, scalar_ty); const vector_ty = try mod.vectorType(.{ .len = len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); @@ -21489,7 +21600,7 @@ fn analyzeShuffle( const res_ty = try mod.vectorType(.{ .len = mask_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { @@ -21516,11 +21627,11 @@ fn analyzeShuffle( const a_ty = try mod.vectorType(.{ .len = a_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const b_ty = try mod.vectorType(.{ .len = b_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21567,25 +21678,21 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { - const values = try sema.arena.alloc(Value, mask_len); - - i = 0; - while (i < mask_len) : (i += 1) { + const values = try sema.arena.alloc(InternPool.Index, mask_len); + for (values) |*value| { const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef(mod)) { - values[i] = Value.undef; + value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); continue; } const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); - if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, unsigned); - } else { - values[i] = try b_val.elemValue(sema.mod, unsigned); - } + values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } - const res_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(res_ty, res_val); + return sema.addConstant(res_ty, (try mod.intern(.{ .aggregate = .{ + .ty = res_ty.toIntern(), + .storage = .{ .elems = values }, + } })).toValue()); } } @@ -21599,22 +21706,25 @@ fn analyzeShuffle( const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); - const expand_mask_values = try sema.arena.alloc(Value, max_len); + const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); + const expand_mask = try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .storage = .{ .elems = expand_mask_values }, + } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); } } @@ -21651,7 +21761,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(mod), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); @@ -21663,7 +21773,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_ty = try mod.vectorType(.{ .len = vec_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21681,21 +21791,17 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); - const elems = try sema.gpa.alloc(Value, vec_len); + const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = try pred_val.elemValue(sema.mod, i); + const pred_elem_val = try pred_val.elemValue(mod, i); const should_choose_a = pred_elem_val.toBool(mod); - if (should_choose_a) { - elem.* = try a_val.elemValue(sema.mod, i); - } else { - elem.* = try b_val.elemValue(sema.mod, i); - } + elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } - return sema.addConstant( - vec_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(vec_ty, (try mod.intern(.{ .aggregate = .{ + .ty = vec_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { break :rs b_src; } @@ -22019,7 +22125,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) { + if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } @@ -22102,7 +22208,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .field => |field| field, else => null, @@ -22244,16 +22350,16 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const lhs_elem_val = try cur_val.elemValue(mod, i); const rhs_elem_val = try operand_val.elemValue(mod, i); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); + elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } - cur_minmax = try sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = simd_op.result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { runtime_known.unset(operand_idx); cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); @@ -22292,7 +22398,7 @@ fn analyzeMinMax( const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try mod.vectorType(.{ .len = len, - .child = refined_elem_ty.ip_index, + .child = refined_elem_ty.toIntern(), }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22377,7 +22483,7 @@ fn analyzeMinMax( const final_ty = if (is_vector) try mod.vectorType(.{ .len = unrefined_ty.vectorLen(mod), - .child = final_elem_ty.ip_index, + .child = final_elem_ty.toIntern(), }) else final_elem_ty; @@ -22765,7 +22871,7 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ - .ty = var_ty.ip_index, + .ty = var_ty.toIntern(), .init = init_val.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( @@ -23239,7 +23345,7 @@ fn zirBuiltinExtern( { const new_var = try mod.intern(.{ .variable = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .init = .none, .decl = sema.owner_decl_index, .is_extern = true, @@ -23264,7 +23370,7 @@ fn zirBuiltinExtern( try sema.ensureDeclAnalyzed(new_decl_index); const ref = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = new_decl_index }, } }); return sema.addConstant(ty, ref.toValue()); @@ -24207,7 +24313,7 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) break :blk; const msg = msg: { @@ -24232,7 +24338,7 @@ fn fieldVal( else try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue()); }, @@ -24376,9 +24482,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_ptr_index, } }, } })).toValue()); @@ -24396,9 +24502,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_len_index, } }, } })).toValue()); @@ -24429,7 +24535,7 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) { break :blk; @@ -24454,7 +24560,7 @@ fn fieldPtr( return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue(), 0, // default alignment @@ -24722,9 +24828,9 @@ fn finishFieldCallBind( if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = struct_ptr_val.ip_index, + .base = struct_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -24908,7 +25014,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, } }); return sema.addConstant(ptr_field_ty, val.toValue()); @@ -24916,7 +25022,7 @@ fn structFieldPtrByIndex( if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ .base = try struct_ptr_val.intern(struct_ptr_ty, mod), .index = field_index, @@ -24942,7 +25048,7 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); @@ -25116,9 +25222,9 @@ fn unionFieldPtr( .Packed, .Extern => {}, } return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_ptr_val.ip_index, + .base = union_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25413,16 +25519,16 @@ fn tupleFieldPtr( if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, - .addr = .{ .comptime_field = default_val.ip_index }, + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = default_val.toIntern() }, } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = tuple_ptr_val.ip_index, + .base = tuple_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25787,11 +25893,11 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - if (val.ip_index == .none or val.ip_index == .null_value) { + if (val.ip_index == .none) { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } else { - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return sema.addConstant(dest_ty, new_val.toValue()); } } @@ -25816,7 +25922,7 @@ fn coerceExtra( // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer if (dest_ty.isPtrLikeOptional(mod) and - dest_ty.elemType2(mod).ip_index == .anyopaque_type and + dest_ty.elemType2(mod).toIntern() == .anyopaque_type and inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; @@ -25954,7 +26060,7 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.toIntern() == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { @@ -26084,12 +26190,12 @@ fn coerceExtra( // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, - .len = (try mod.intValue(Type.usize, 0)).ip_index, + try dest_info.pointee_type.lazyAbiAlignment(mod)).toIntern() }, + .len = (try mod.intValue(Type.usize, 0)).toIntern(), } })).toValue()); } @@ -26166,7 +26272,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { @@ -26258,7 +26364,7 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { @@ -26294,14 +26400,14 @@ fn coerceExtra( .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.ip_index) { + switch (inst_val.toIntern()) { .undef => return sema.addConstUndef(dest_ty), - else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| { const error_set_ty = inst_ty.errorUnionSet(mod); const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ - .ty = error_set_ty.ip_index, + .ty = error_set_ty.toIntern(), .name = err_name, } })).toValue()); return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); @@ -26597,7 +26703,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26724,7 +26830,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -27016,9 +27122,9 @@ fn coerceInMemoryAllowedErrorSets( const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.ip_index) { + switch (src_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. @@ -27054,15 +27160,15 @@ fn coerceInMemoryAllowedErrorSets( var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); - switch (src_ty.ip_index) { - .anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) { + switch (src_ty.toIntern()) { + .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. .simple_type => unreachable, // filtered out above .error_set_type => return .from_anyerror, else => unreachable, }, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { const src_data = mod.inferredErrorSetPtr(src_index); @@ -27520,9 +27626,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // allocations is relevant to this function, or why it would have // different behavior depending on whether the types were inferred. // Something seems wrong here. - if (prev_ptr_ty.ip_index == .none) { - if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null; - if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null; + switch (prev_ptr_ty.ip_index) { + .inferred_alloc_mut_type, .inferred_alloc_const_type => return null, + else => {}, } const prev_ptr_child_ty = prev_ptr_ty.childType(mod); @@ -27554,11 +27660,11 @@ fn storePtrVal( ) !void { const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); - try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); + try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl); switch (mut_kit.pointee) { .direct => |val_ptr| { - if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) { + if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); @@ -27601,7 +27707,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27627,12 +27733,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27645,99 +27751,85 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - if (true) unreachable; const mod = sema.mod; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; + switch (ptr.addr) { + .decl => unreachable, // isComptimeMutablePtr has been checked already + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), + duped.* = comptime_field.toValue(); + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(ptr_val.toIntern()).toType(), duped, ptr_elem_ty, .{ + .decl = undefined, .runtime_index = .comptime_field_ptr, }); }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } - - switch (val_ptr.ip_index) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + else => unreachable, + } + if (true) unreachable; + switch (ptr_val.toIntern()) { + .none => switch (ptr_val.tag()) { + .decl_ref_mut => { + const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; + const decl = sema.mod.declPtr(decl_ref_mut.decl_index); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + }, + .comptime_field_ptr => { + const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + const duped = try sema.arena.create(Value); + duped.* = payload.field_val; + return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ + .decl_index = @intToEnum(Module.Decl.Index, 0), + .runtime_index = .comptime_field_ptr, + }); + }, + .elem_ptr => { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + } - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. + switch (val_ptr.toIntern()) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27751,392 +27843,383 @@ fn beginComptimePtrMutation( parent.decl_ref_mut, ); }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .str_lit => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `str_lit` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const str_lit = val_ptr.castTag(.str_lit).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (bytes, 0..) |byte, i| { + elems[i] = try mod.intValue(elem_ty, byte); + } + if (parent.ty.sentinel(mod)) |sent_val| { + assert(elems.len == bytes.len + 1); + elems[bytes.len] = sent_val; + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], ptr_elem_ty, parent.decl_ref_mut, - ); + ), + + .the_only_possible_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + else => unreachable, }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = elem_ptr.elem_ty, + }; + } - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field_ptr => { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.toIntern()) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - &elems[elem_ptr.index], + parent.ty.structFieldType(field_index, mod), + &fields[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - duped, + parent.ty.structFieldType(field_index, mod), + &payload.data.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); + + switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + else => unreachable, + } + }, else => unreachable, - }, - else => unreachable, - } - }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, - }; - } + } + }, + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.decl_ref_mut, + ), + .repeated => { + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.ip_index) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &fields[field_index], + &elems[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - val_ptr.* = Value.initPayload(&payload.base); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.data.val, + &payload.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); + .slice => switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), - else => unreachable, - } + else => unreachable, }, - else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &elems[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), else => unreachable, }, - else => unreachable, }, - else => unreachable, - }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - + .reinterpret => |reinterpret| { + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.ip_index) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .eu_payload_ptr => { + const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. + // representation of the error union from `undef` to `opt_payload`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ - .base = .{ .tag = .opt_payload }, + .base = .{ .tag = .eu_payload }, .data = Value.undef, }; @@ -28147,39 +28230,84 @@ fn beginComptimePtrMutation( .pointee = .{ .direct = &payload.data }, .ty = payload_ty, }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = eu_ptr.container_ty, + }, + } + }, + .opt_payload_ptr => { + const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); + }; + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.toIntern()) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, + }, else => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .pointee = .{ .direct = val_ptr }, .ty = payload_ty, }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, - }, - } + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = opt_ptr.container_ty, + }, + } + }, + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { + else => unreachable, }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, } } @@ -28190,13 +28318,13 @@ fn beginComptimePtrMutationInner( decl_ty: Type, decl_val: *Value, ptr_elem_ty: Type, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!ComptimePtrMutationKit { const mod = sema.mod; const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - const decl = mod.declPtr(decl_ref_mut.decl_index); + const decl = mod.declPtr(mut_decl.decl); var decl_arena: std.heap.ArenaAllocator = undefined; const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); @@ -28204,7 +28332,7 @@ fn beginComptimePtrMutationInner( if (coerce_ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28215,7 +28343,7 @@ fn beginComptimePtrMutationInner( const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28224,20 +28352,20 @@ fn beginComptimePtrMutationInner( if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_decl_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_decl_ty, .ty = decl_ty, }; } if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_ptr_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_ptr_ty, .ty = ptr_elem_ty, }; } return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = decl_val, .byte_offset = 0, @@ -28282,7 +28410,7 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl => blk: { const decl_index = switch (ptr.addr) { @@ -28319,7 +28447,7 @@ fn beginComptimePtrLoad( (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { - const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), .payload => |payload| payload, @@ -28462,7 +28590,7 @@ fn beginComptimePtrLoad( }, Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), + .val = mod.intern_pool.indexToKey(tv.val.toIntern()).ptr.len.toValue(), }, else => unreachable, }; @@ -28565,9 +28693,9 @@ fn coerceArrayPtrToSlice( const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); const slice_val = try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, - .addr = mod.intern_pool.indexToKey(val.ip_index).ptr.addr, - .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).ip_index, + .ty = dest_ty.toIntern(), + .addr = mod.intern_pool.indexToKey(val.toIntern()).ptr.addr, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return sema.addConstant(dest_ty, slice_val.toValue()); } @@ -28643,7 +28771,7 @@ fn coerceCompatiblePtrs( return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( sema.gpa, try val.intern(inst_ty, mod), - dest_ty.ip_index, + dest_ty.toIntern(), )).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -28840,7 +28968,7 @@ fn coerceAnonStructToUnion( return sema.failWithOwnedErrorMsg(msg); } - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); @@ -28916,23 +29044,20 @@ fn coerceArrayLike( return block.addBitCast(dest_ty, inst); } - const element_vals = try sema.arena.alloc(Value, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i| { - const index_ref = try sema.addConstant( - Type.usize, - try mod.intValue(Type.usize, i), - ); + for (element_vals, element_refs, 0..) |*val, *ref, i| { + const index_ref = try sema.addConstant(Type.usize, try mod.intValue(Type.usize, i)); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28944,10 +29069,10 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28978,25 +29103,26 @@ fn coerceTupleToArray( } const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, dest_elems); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i_usize| { + for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel(mod).?; - element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); + const sentinel_val = dest_ty.sentinel(mod).?; + val.* = sentinel_val.toIntern(); + ref.* = try sema.addConstant(dest_elem_ty, sentinel_val); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -29008,10 +29134,10 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -29079,7 +29205,7 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; var runtime_src: ?LazySrcLoc = null; for (0..anon_struct.types.len) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); @@ -29105,8 +29231,7 @@ fn coerceTupleToStruct( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - assert(field_val.ip_index != .none); - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29123,7 +29248,7 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -29134,8 +29259,7 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - assert(field.default_val.ip_index != .none); - field_vals[i] = field.default_val.ip_index; + field_vals[i] = field.default_val.toIntern(); } else { field_ref.* = try sema.addConstant(field.ty, field.default_val); } @@ -29152,9 +29276,8 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - assert(struct_ty.ip_index != .none); const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); errdefer mod.intern_pool.remove(struct_val); @@ -29170,13 +29293,13 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type; + const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type; const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const src_tuple = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; @@ -29209,7 +29332,7 @@ fn coerceTupleToTuple( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29269,7 +29392,7 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, (try mod.intern(.{ .aggregate = .{ - .ty = tuple_ty.ip_index, + .ty = tuple_ty.toIntern(), .storage = .{ .elems = field_vals }, } })).toValue(), ); @@ -29349,7 +29472,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { try sema.maybeQueueFuncBodyAnalysis(decl); try mod.declareDeclDependency(sema.owner_decl_index, decl); const result = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = decl }, } }); return result.toValue(); @@ -29360,8 +29483,8 @@ fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); const result = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, - .val = ptr_val.ip_index, + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).toIntern())).toIntern(), + .val = ptr_val.toIntern(), } }); return result.toValue(); } @@ -29382,7 +29505,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); const ptr_ty = try mod.ptrType(.{ - .elem_type = decl_tv.ty.ip_index, + .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", @@ -29391,7 +29514,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo try sema.maybeQueueFuncBodyAnalysis(decl_index); } return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .decl = decl_index }, } })).toValue()); } @@ -29415,13 +29538,10 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), - else => {}, - }, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), + else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29617,9 +29737,9 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = operand_ty.errorUnionSet(mod); - switch (set_ty.ip_index) { + switch (set_ty.toIntern()) { .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { .error_set_type => |error_set_type| { if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; }, @@ -30027,7 +30147,7 @@ fn analyzeSlice( return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), - return_ty.ip_index, + return_ty.toIntern(), )).toValue()); } @@ -30546,8 +30666,8 @@ fn wrapOptional( ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, - .val = val.ip_index, + .ty = dest_ty.toIntern(), + .val = val.toIntern(), } })).toValue()); } @@ -30567,8 +30687,8 @@ fn wrapErrorUnionPayload( const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ - .ty = dest_ty.ip_index, - .val = .{ .payload = val.ip_index }, + .ty = dest_ty.toIntern(), + .val = .{ .payload = val.toIntern() }, } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30588,17 +30708,17 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.ip_index) { + switch (dest_err_set_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { + else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { .error_set_type => |error_set_type| ok: { - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. @@ -31252,34 +31372,31 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => {}, - .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl, .mut_decl => {}, - .int => |int| try sema.resolveLazyValue(int.toValue()), - .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), - .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), - .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), - } - if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => {}, - .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), - .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), - }, - .un => |un| { - try sema.resolveLazyValue(un.tag.toValue()); - try sema.resolveLazyValue(un.val.toValue()); - }, - else => {}, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); + }, + else => {}, } } @@ -31617,9 +31734,9 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -31776,7 +31893,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => return sema.resolveStructFully(ty), .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { @@ -31869,7 +31986,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; - switch (ty.ip_index) { + switch (ty.toIntern()) { .var_args_param_type => unreachable, .none => unreachable, @@ -31960,7 +32077,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .call_modifier_type => return sema.getBuiltinType("CallModifier"), .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), - _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + _ => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; try sema.resolveTypeFieldsStruct(ty, struct_obj); @@ -32605,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; @@ -32698,7 +32815,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk val; }; - enum_field_vals[field_i] = copied_val.ip_index; + enum_field_vals[field_i] = copied_val.toIntern(); const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ .ty = int_tag_ty, .mod = mod, @@ -32960,7 +33077,7 @@ fn generateUnionTagTypeSimple( .tag_ty = if (enum_field_names.len == 0) .noreturn_type else - (try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index, + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, .values = &.{}, .tag_mode = .auto, @@ -33053,9 +33170,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => Value.empty_struct, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33074,13 +33191,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { inline .array_type, .vector_type => |seq_type| { if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = opv.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, } })).toValue(); } return null; @@ -33169,7 +33286,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // This TODO is repeated in the redundant implementation of // one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } }); return empty.toValue(); @@ -33182,7 +33299,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // In this case the struct has all comptime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = tuple.values }, } })).toValue(); }, @@ -33208,9 +33325,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse return null; const only = try mod.intern(.{ .un = .{ - .ty = resolved_ty.ip_index, - .tag = tag_val.ip_index, - .val = val_val.ip_index, + .ty = resolved_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), } }); return only.toValue(); }, @@ -33221,8 +33338,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, - .int = int_opv.ip_index, + .ty = ty.toIntern(), + .int = int_opv.toIntern(), } }); return only.toValue(); } @@ -33234,7 +33351,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = 0 }, @@ -33285,21 +33402,13 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - if (ty.ip_index != .none) { - if (@enumToInt(ty.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); - try sema.air_instructions.append(sema.gpa, .{ - .tag = .interned, - .data = .{ .interned = ty.ip_index }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } else { - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } + if (@enumToInt(ty.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.toIntern())); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.toIntern() }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33313,12 +33422,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; - if (val.ip_index != .none and val.ip_index != .null_value) { - if (@enumToInt(val.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + if (val.ip_index != .none) { + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ .tag = .interned, - .data = .{ .interned = val.ip_index }, + .data = .{ .interned = val.toIntern() }, }); const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); // This assertion can be removed when the `ty` parameter is removed from @@ -33417,7 +33526,7 @@ fn analyzeComptimeAlloc( try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_type.ip_index, + .ty = ptr_type.toIntern(), .addr = .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -33589,7 +33698,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => null, .C => ptr_type.elem_type.toType(), @@ -33624,10 +33733,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => return false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -33873,7 +33982,7 @@ fn anonStructFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; - const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; for (anon_struct.names, 0..) |name, i| { if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { return @intCast(u32, i); @@ -33891,14 +34000,17 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intAddScalar(lhs, rhs, ty); } @@ -33945,14 +34057,17 @@ fn numberAddWrapScalar( fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intSubScalar(lhs, rhs, ty); } @@ -34004,18 +34119,26 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -34057,13 +34180,17 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(mod); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); + const elem_ty = float_ty.scalarType(mod); + const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); + const scalar_ty = int_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(sema.mod, i); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); + scalar.* = try (try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = int_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.floatToIntScalar(block, src, val, float_ty, int_ty); } @@ -34139,16 +34266,16 @@ fn intFitsInType( vector_index: ?*usize, ) CompileError!bool { const mod = sema.mod; - if (ty.ip_index == .comptime_int_type) return true; + if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); - switch (val.ip_index) { + switch (val.toIntern()) { .undef, .zero, .zero_usize, .zero_u8, => return true, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); @@ -34219,12 +34346,12 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; - const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); + const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.toIntern(), enum_type.tag_ty); return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; } @@ -34237,18 +34364,26 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -34340,14 +34475,17 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); - scalar.* = Value.makeBool(res_bool); + scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } /// Returns the type of a pointer to an element. diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 2222c1060e..d82fb72dea 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -103,10 +103,26 @@ pub fn print( return writer.writeAll(" }"); }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(elem_tv, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index faf158e2a4..16b103c898 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -846,7 +846,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6169,7 +6168,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 778662fe86..f0a44b72a8 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -830,7 +830,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6117,7 +6116,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a9cd130fa8..7f4715a451 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -660,7 +660,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -2571,7 +2570,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index dc086dc00f..9f44dc0e8a 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -680,7 +680,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -4567,7 +4566,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst), } } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 66c0399343..85fc8346f8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1833,7 +1833,6 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .add => func.airBinOp(inst, .add), @@ -6903,28 +6902,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .child = .u8_type, .sentinel = .zero_u8, }); - const string_bytes = &mod.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{ - .bytes = string_bytes, - }, Module.StringLiteralContext{ - .bytes = string_bytes, - }); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, tag_name.len), - }; - string_bytes.appendSliceAssumeCapacity(tag_name); - gop.value_ptr.* = .none; - } - var name_val_payload: Value.Payload.StrLit = .{ - .base = .{ .tag = .str_lit }, - .data = gop.key_ptr.*, - }; - const name_val = Value.initPayload(&name_val_payload.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = tag_name }, + } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( - .{ .ty = name_ty, .val = name_val }, + .{ .ty = name_ty, .val = name_val.toValue() }, enum_decl_index, ); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4a5532a239..f2ac985844 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1923,7 +1923,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), @@ -2099,7 +2098,7 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { switch (self.air.instructions.items(.tag)[inst]) { - .constant, .const_ty => unreachable, + .constant => unreachable, else => self.inst_tracking.getPtr(inst).?.die(self, inst), } } @@ -11593,7 +11592,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { })); break :tracking gop.value_ptr; }, - .const_ty => unreachable, else => self.inst_tracking.getPtr(inst).?, }.short; switch (mcv) { @@ -11608,7 +11606,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { .constant => &self.const_tracking, - .const_ty => unreachable, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen.zig b/src/codegen.zig index b9b7dac90f..f343f0441d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -204,150 +204,6 @@ pub fn generateSymbol( return .ok; } - if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { - .Array => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - // The bytes payload already includes the sentinel, if any - try code.ensureUnusedCapacity(len); - code.appendSliceAssumeCapacity(bytes[0..len]); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try code.ensureUnusedCapacity(bytes.len + 1); - code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - code.appendAssumeCapacity(byte); - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for array type value: {s}", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - .Struct => { - if (typed_value.ty.containerLayout(mod) == .Packed) { - const struct_obj = mod.typeToStruct(typed_value.ty).?; - const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const current_pos = code.items.len; - try code.resize(current_pos + abi_size); - var bits: u16 = 0; - - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; - // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag(mod) == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, &tmp_list, debug_output, reloc_info)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } - } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; - } - bits += @intCast(u16, field_ty.bitSize(mod)); - } - - return Result.ok; - } - - const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index, mod); - if (!field_ty.hasRuntimeBits(mod)) continue; - - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - const unpadded_field_end = code.items.len - struct_begin; - - // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); - const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - .Frame, - .AnyFrame, - => return .{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO generateSymbol for type {}", - .{typed_value.ty.fmt(mod)}, - ) }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { .int_type, .ptr_type, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1bb8130b1f..76533b4284 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -870,7 +870,7 @@ pub const DeclGen = struct { } // First try specific tag representations for more efficiency. - switch (val.ip_index) { + switch (val.toIntern()) { .undef => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); @@ -893,24 +893,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); return; }, - .none => switch (val.tag()) { - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), - }); - return; - }, - else => {}, - }, else => {}, } // Fall back to generic implementation. @@ -2909,7 +2891,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .arg => try airArg(f, inst), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f8ddddad1c..e54b951aa6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1501,7 +1501,7 @@ pub const Object = struct { } const ip = &mod.intern_pool; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); @@ -1697,7 +1697,7 @@ pub const Object = struct { return ptr_di_ty; }, .Opaque => { - if (ty.ip_index == .anyopaque_type) { + if (ty.toIntern() == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -1981,7 +1981,7 @@ pub const Object = struct { break :blk fwd_decl; }; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2466,7 +2466,7 @@ pub const DeclGen = struct { global.setGlobalConstant(.True); break :init_val decl.val; }; - if (init_val.ip_index != .unreachable_value) { + if (init_val.toIntern() != .unreachable_value) { const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); @@ -2802,12 +2802,12 @@ pub const DeclGen = struct { return dg.context.pointerType(llvm_addrspace); }, .Opaque => { - if (t.ip_index == .anyopaque_type) return dg.context.intType(8); + if (t.toIntern() == .anyopaque_type) return dg.context.intType(8); const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; + const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; const name = try mod.opaqueFullyQualifiedName(opaque_type); defer gpa.free(name); @@ -2897,7 +2897,7 @@ pub const DeclGen = struct { const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { .anon_struct_type => |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3199,7 +3199,7 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .runtime_value => |rt| tv.val = rt.val.toValue(), else => {}, } @@ -3208,284 +3208,7 @@ pub const DeclGen = struct { return llvm_type.getUndef(); } - if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - else => unreachable, - }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; - - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); - - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; - - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } - - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, field_and_index.index), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -3553,7 +3276,7 @@ pub const DeclGen = struct { const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }.toValue(), }); @@ -3700,7 +3423,7 @@ pub const DeclGen = struct { fields_buf[0] = try dg.lowerValue(.{ .ty = payload_ty, .val = switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }.toValue(), }); @@ -3711,7 +3434,7 @@ pub const DeclGen = struct { } return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .array_type => switch (aggregate.storage) { .bytes => |bytes| return dg.context.constString( bytes.ptr, @@ -3802,7 +3525,7 @@ pub const DeclGen = struct { const llvm_struct_ty = try dg.lowerType(tv.ty); const gpa = dg.gpa; - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; defer llvm_fields.deinit(gpa); @@ -3967,9 +3690,9 @@ pub const DeclGen = struct { }, .un => { const llvm_union_ty = try dg.lowerType(tv.ty); - const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { + const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { .none => tv.val.castTag(.@"union").?.data, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, else => unreachable, }, @@ -4107,7 +3830,7 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), @@ -4799,7 +4522,6 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .unreach => self.airUnreach(inst), @@ -6108,7 +5830,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); if (isByRef(field_ty, mod)) { @@ -6984,7 +6706,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); return self.load(field_ptr, field_ptr_ty); @@ -8915,7 +8637,7 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -8988,7 +8710,7 @@ pub const FuncGen = struct { fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -10529,7 +10251,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { var offset: u64 = 0; var big_align: u32 = 0; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_field_index: c_uint = 0; for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { @@ -10927,7 +10649,7 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.ip_index == .f16_type) { + if (ty.toIntern() == .f16_type) { return .as_u16; } switch (riscv_c_abi.classifyType(ty, mod)) { @@ -11146,7 +10868,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout(mod) == .Packed) return false; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types, tuple.values) |field_ty, field_val| { @@ -11261,7 +10983,7 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.ip_index) { + return switch (scalar_ty.toIntern()) { .f16_type => backendSupportsF16(target), .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 96c723989a..2b04e03a5a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -616,7 +616,7 @@ pub const DeclGen = struct { const mod = dg.module; var val = arg_val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |rt| val = rt.val.toValue(), else => {}, } @@ -626,75 +626,7 @@ pub const DeclGen = struct { return try self.addUndef(size); } - if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { - .Array => switch (val.tag()) { - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try self.addBytes(bytes); - if (ty.sentinel(mod)) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); - } - }, - .bytes => { - const bytes = val.castTag(.bytes).?.data; - try self.addBytes(bytes); - }, - else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), - }, - .Struct => { - if (ty.isSimpleTupleOrAnonStruct(mod)) { - unreachable; // TODO - } else { - const struct_ty = mod.typeToStruct(ty).?; - - if (struct_ty.layout == .Packed) { - return dg.todo("packed struct constants", .{}); - } - - const struct_begin = self.size; - const field_vals = val.castTag(.aggregate).?.data; - for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; - try self.lower(field.ty, field_vals[i]); - - // Add padding if required. - // TODO: Add to type generation as well? - const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, mod); - const padding = padded_field_end - unpadded_field_end; - try self.addUndef(padding); - } - } - }, - .Vector, - .Frame, - .AnyFrame, - => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -1876,7 +1808,6 @@ pub const DeclGen = struct { .breakpoint => return, .cond_br => return self.airCondBr(inst), .constant => unreachable, - .const_ty => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 9169a88bbc..58e4029543 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -95,7 +95,7 @@ const Writer = struct { for (w.air.instructions.items(.tag), 0..) |tag, i| { const inst = @intCast(Air.Inst.Index, i); switch (tag) { - .constant, .const_ty, .interned => { + .constant, .interned => { try w.writeInst(s, inst); try s.writeByte('\n'); }, @@ -226,7 +226,6 @@ const Writer = struct { .save_err_return_trace_index, => try w.writeNoOp(s, inst), - .const_ty, .alloc, .ret_ptr, .err_return_trace, diff --git a/src/value.zig b/src/value.zig index 47215e588c..ef3a3f6be1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -37,8 +37,9 @@ pub const Value = struct { /// A slice of u8 whose memory is managed externally. bytes, - /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. - str_lit, + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + repeated, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -57,9 +58,9 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .bytes => Payload.Bytes, + .repeated => Payload.SubValue, - .str_lit => Payload.StrLit, + .bytes => Payload.Bytes, .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, @@ -171,7 +172,18 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), + .repeated => { + const payload = self.cast(Payload.SubValue).?; + const new_payload = try arena.create(Payload.SubValue); + new_payload.* = .{ + .base = payload.base, + .data = try payload.data.copy(arena), + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -187,7 +199,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .@"union" => { const tag_and_val = self.castTag(.@"union").?.data; const new_payload = try arena.create(Payload.Union); @@ -203,7 +214,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, } @@ -237,7 +247,7 @@ pub const Value = struct { ) !void { comptime assert(fmt.len == 0); if (start_val.ip_index != .none) { - try out_stream.print("(interned: {})", .{start_val.ip_index}); + try out_stream.print("(interned: {})", .{start_val.toIntern()}); return; } var val = start_val; @@ -249,11 +259,9 @@ pub const Value = struct { return out_stream.writeAll("(union value)"); }, .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return out_stream.print("(.str_lit index={d} len={d})", .{ - str_lit.index, str_lit.len, - }); + .repeated => { + try out_stream.writeAll("(repeated) "); + val = val.castTag(.repeated).?.data; }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), @@ -274,40 +282,24 @@ pub const Value = struct { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => { - const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); - const adjusted_bytes = bytes[0..adjusted_len]; - return allocator.dupe(u8, adjusted_bytes); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return allocator.dupe(u8, bytes); - }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), - .ptr => |ptr| switch (ptr.len) { - .none => unreachable, - else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), - .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; }, - else => unreachable, }, - } + else => unreachable, + }; } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { @@ -320,13 +312,13 @@ pub const Value = struct { } pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); + if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern()); switch (val.tag()) { .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); - const ty_key = mod.intern_pool.indexToKey(ty.ip_index); + const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| new_elem.* = try old_elem.intern(switch (ty_key) { .struct_type => ty.structFieldType(field_i, mod), @@ -335,14 +327,14 @@ pub const Value = struct { else => unreachable, }, mod); return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = new_elems }, } }); }, .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod), .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), } }); @@ -353,13 +345,15 @@ pub const Value = struct { pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { if (val.ip_index == .none) return val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| return Tag.bytes.create(arena, try arena.dupe(u8, bytes)), .elems => |old_elems| { const new_elems = try arena.alloc(Value, old_elems.len); for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, + .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -372,40 +366,38 @@ pub const Value = struct { /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { - return self.ip_index.toType(); + return self.toIntern().toType(); } pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; - switch (val.ip_index) { - else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, - .enum_type => |enum_type| (try ip.getCoerced( - mod.gpa, - val.ip_index, - enum_type.tag_ty, - )).toValue(), - else => unreachable, + return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.toIntern())) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; }, - } + .enum_type => |enum_type| (try ip.getCoerced( + mod.gpa, + val.toIntern(), + enum_type.tag_ty, + )).toValue(), + else => unreachable, + }; } pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { + const enum_tag = switch (ip.indexToKey(val.toIntern())) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, .enum_literal => |name| return ip.stringToSlice(name), @@ -413,7 +405,7 @@ pub const Value = struct { }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; const field_index = field_index: { - const field_index = enum_type.tagValueIndex(ip, val.ip_index).?; + const field_index = enum_type.tagValueIndex(ip, val.toIntern()).?; break :field_index @intCast(u32, field_index); }; const field_name = enum_type.names[field_index]; @@ -432,12 +424,12 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), @@ -475,18 +467,18 @@ pub const Value = struct { } pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { - return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none; } pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .extern_func => |extern_func| extern_func, else => null, } else null; } pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable, else => null, } else null; @@ -501,11 +493,11 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, @@ -531,11 +523,11 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, @@ -549,7 +541,7 @@ pub const Value = struct { } pub fn toBool(val: Value, _: *const Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_true => true, .bool_false => false, else => unreachable, @@ -558,7 +550,7 @@ pub const Value = struct { fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return true, .eu_payload, .opt_payload => |index| check = index.toValue(), @@ -644,7 +636,7 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const name = switch (mod.intern_pool.indexToKey(val.toIntern())) { .err => |err| err.name, .error_union => |error_union| error_union.val.err_name, else => unreachable, @@ -718,7 +710,7 @@ pub const Value = struct { if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const ip_key = mod.intern_pool.indexToKey(int_val.toIntern()); const int: u64 = switch (ip_key.int.storage) { .u64 => |x| x, .i64 => |x| @bitCast(u64, x), @@ -847,7 +839,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, @@ -860,13 +852,16 @@ pub const Value = struct { .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { - elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); + elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); offset += @intCast(usize, elem_size); } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes @@ -878,13 +873,16 @@ pub const Value = struct { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); - for (fields, 0..) |field, i| { + const field_vals = try arena.alloc(InternPool.Index, fields.len); + for (field_vals, fields, 0..) |*field_val, field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod)); - field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); + const sz = @intCast(usize, field.ty.abiSize(mod)); + field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, .Packed => { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; @@ -897,7 +895,7 @@ pub const Value = struct { const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); const name = mod.error_name_list.items[@intCast(usize, int)]; return (try mod.intern(.{ .err = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .name = mod.intern_pool.getString(name).unwrap().?, } })).toValue(); }, @@ -961,7 +959,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, @@ -973,17 +971,20 @@ pub const Value = struct { } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; - elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena); + elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); bits += elem_bit_size; } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already @@ -991,13 +992,16 @@ pub const Value = struct { .Packed => { var bits: u16 = 0; const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); + const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); - field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); + field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, }, .Pointer => { @@ -1015,7 +1019,7 @@ pub const Value = struct { /// Asserts that the value is a float or an integer. pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), inline .u64, .i64 => |x| { @@ -1119,7 +1123,7 @@ pub const Value = struct { pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = switch (dest_ty.floatBits(target)) { 16 => .{ .f16 = self.toFloat(f16, mod) }, 32 => .{ .f32 = self.toFloat(f32, mod) }, @@ -1133,7 +1137,7 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(self.ip_index)) { + return switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| @rem(x, 1) != 0, }, @@ -1150,10 +1154,10 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - return switch (lhs.ip_index) { + return switch (lhs.toIntern()) { .bool_false => .eq, .bool_true => .gt, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => .gt, .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), @@ -1212,8 +1216,8 @@ pub const Value = struct { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); if (lhs_tag == rhs_tag) { - const lhs_storage = mod.intern_pool.indexToKey(lhs.ip_index).float.storage; - const rhs_storage = mod.intern_pool.indexToKey(rhs.ip_index).float.storage; + const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage; + const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage; const lhs128: f128 = switch (lhs_storage) { inline else => |x| x, }; @@ -1336,46 +1340,20 @@ pub const Value = struct { } } - switch (lhs.ip_index) { - .none => switch (lhs.tag()) { - .aggregate => { - for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; - } - return true; - }, - .str_lit => { - const str_lit = lhs.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - .bytes => { - const bytes = lhs.castTag(.bytes).?.data; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - else => {}, + switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, }, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { - .float => |float| switch (float.storage) { - inline else => |x| if (std.math.isNan(x)) return op == .neq, - }, - .aggregate => |aggregate| return switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| { - if (!std.math.order(byte, 0).compare(op)) break false; - } else true, - .elems => |elems| for (elems) |elem| { - if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; - } else true, - .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - }, - else => {}, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), }, + else => {}, } return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } @@ -1412,7 +1390,7 @@ pub const Value = struct { const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct| { assert(anon_struct.types.len == a_field_vals.len); for (anon_struct.types, 0..) |field_ty, i| { @@ -1577,7 +1555,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } const zig_ty_tag = ty.zigTypeTag(mod); @@ -1663,7 +1641,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } @@ -1703,7 +1681,7 @@ pub const Value = struct { }, .Union => { hasher.update(val.tagName(mod)); - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .un => |un| { const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); @@ -1746,7 +1724,7 @@ pub const Value = struct { }; pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .mut_decl, .comptime_field => true, .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), @@ -1758,8 +1736,8 @@ pub const Value = struct { } pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { - return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => false, .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), @@ -1785,7 +1763,7 @@ pub const Value = struct { /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.decl, .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, @@ -1811,35 +1789,19 @@ pub const Value = struct { pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - return mod.intern_pool.slicePtr(val.ip_index).toValue(); + return mod.intern_pool.slicePtr(val.toIntern()).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); + return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - return mod.intValue(Type.u8, byte); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - return mod.intValue(Type.u8, byte); - }, - - .aggregate => return val.castTag(.aggregate).?.data[index], - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1871,26 +1833,26 @@ pub const Value = struct { } pub fn isLazyAlign(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_align, else => false, }; } pub fn isLazySize(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_size, else => false, }; } pub fn isRuntimeValue(val: Value, mod: *Module) bool { - return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; + return mod.intern_pool.indexToKey(val.toIntern()) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => true, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1913,7 +1875,7 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.is_threadlocal, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1943,55 +1905,30 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); - }, + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), - .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), - else => unreachable, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.toIntern()), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = mod.intern_pool.typeOf(val.ip_index), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } })).toValue(), - else => unreachable, - }, + } })).toValue(), + else => unreachable, }; } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -2000,6 +1937,8 @@ pub const Value = struct { .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), + // TODO assert the tag is correct + .un => |un| un.val.toValue(), else => unreachable, }, } @@ -2007,7 +1946,7 @@ pub const Value = struct { pub fn unionTag(val: Value, mod: *Module) Value { if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef, .enum_tag => val, .un => |un| un.tag.toValue(), else => unreachable, @@ -2022,12 +1961,12 @@ pub const Value = struct { mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2043,9 +1982,9 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ - .base = ptr_val.ip_index, + .base = ptr_val.toIntern(), .index = index, } }, } })).toValue(); @@ -2053,7 +1992,7 @@ pub const Value = struct { pub fn isUndef(val: Value, mod: *Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, else => false, @@ -2070,15 +2009,9 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => true, - .none => switch (val.tag()) { - .aggregate => for (val.castTag(.aggregate).?.data) |field| { - if (try field.anyUndef(mod)) break true; - } else false, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, .ptr => |ptr| switch (ptr.len) { @@ -2098,13 +2031,13 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. pub fn isNull(val: Value, mod: *Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => { var buf: BigIntSpace = undefined; return val.toBigInt(&buf, mod).eqZero(); @@ -2120,7 +2053,7 @@ pub const Value = struct { /// something is an error or not because it works without having to figure out the /// string. pub fn getError(self: Value, mod: *const Module) ?[]const u8 { - return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.toIntern())) { .err => |err| err.name.toOptional(), .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| err_name.toOptional(), @@ -2133,12 +2066,12 @@ pub const Value = struct { /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { - return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; + return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + return switch (mod.intern_pool.indexToKey(val.toIntern()).opt.val) { .none => null, else => |index| index.toValue(), }; @@ -2146,14 +2079,9 @@ pub const Value = struct { /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + return switch (self.toIntern()) { .undef => unreachable, - .none => switch (self.tag()) { - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { + else => switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => true, else => false, }, @@ -2169,21 +2097,24 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema); + scalar.* = try (try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intToFloatScalar(val, float_ty, mod, opt_sema); } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => val, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| { const float = bigIntToFloat(big_int.limbs, big_int.positive); @@ -2217,7 +2148,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = storage, } })).toValue(); } @@ -2245,14 +2176,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intAddSatScalar(lhs, rhs, ty, arena, mod); } @@ -2292,14 +2226,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intSubSatScalar(lhs, rhs, ty, arena, mod); } @@ -2338,19 +2275,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const vec_len = ty.vectorLen(mod); + const overflowed_data = try arena.alloc(InternPool.Index, vec_len); + const result_data = try arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); @@ -2400,13 +2344,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return numberMulWrapScalar(lhs, rhs, ty, arena, mod); } @@ -2442,13 +2390,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulSatScalar(lhs, rhs, ty, arena, mod); } @@ -2515,12 +2467,16 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNotScalar(val, ty, arena, mod); } @@ -2552,13 +2508,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } @@ -2586,13 +2546,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNandScalar(lhs, rhs, ty, arena, mod); } @@ -2609,13 +2573,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } @@ -2642,14 +2610,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } @@ -2676,14 +2647,17 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivScalar(lhs, rhs, ty, allocator, mod); } @@ -2715,14 +2689,17 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } @@ -2754,14 +2731,17 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intModScalar(lhs, rhs, ty, allocator, mod); } @@ -2794,7 +2774,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. pub fn isNan(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNan(x), }, @@ -2805,7 +2785,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. pub fn isInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isInf(x), }, @@ -2815,7 +2795,7 @@ pub const Value = struct { pub fn isNegativeInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNegativeInf(x), }, @@ -2825,13 +2805,17 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatRemScalar(lhs, rhs, float_type, mod); } @@ -2847,20 +2831,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatModScalar(lhs, rhs, float_type, mod); } @@ -2876,21 +2864,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulScalar(lhs, rhs, ty, allocator, mod); } @@ -2918,13 +2909,16 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, bits, mod); } @@ -2939,14 +2933,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); const bits_elem = try bits.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } @@ -2976,14 +2973,17 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlScalar(lhs, rhs, ty, allocator, mod); } @@ -3015,18 +3015,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); + const result_data = try allocator.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(allocator, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); @@ -3071,13 +3079,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlSatScalar(lhs, rhs, ty, arena, mod); } @@ -3117,13 +3129,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlTruncScalar(lhs, rhs, ty, arena, mod); } @@ -3143,14 +3159,17 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shrScalar(lhs, rhs, ty, allocator, mod); } @@ -3193,12 +3212,16 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatNegScalar(val, float_type, mod); } @@ -3218,7 +3241,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3231,13 +3254,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatAddScalar(lhs, rhs, float_type, mod); } @@ -3258,7 +3285,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3271,13 +3298,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatSubScalar(lhs, rhs, float_type, mod); } @@ -3298,7 +3329,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3311,13 +3342,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivScalar(lhs, rhs, float_type, mod); } @@ -3338,7 +3373,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3351,13 +3386,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivFloorScalar(lhs, rhs, float_type, mod); } @@ -3378,7 +3417,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3391,13 +3430,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivTruncScalar(lhs, rhs, float_type, mod); } @@ -3418,7 +3461,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3431,13 +3474,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatMulScalar(lhs, rhs, float_type, mod); } @@ -3458,19 +3505,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sqrtScalar(val, float_type, mod); } @@ -3486,19 +3537,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sinScalar(val, float_type, mod); } @@ -3514,19 +3569,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return cosScalar(val, float_type, mod); } @@ -3542,19 +3601,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return tanScalar(val, float_type, mod); } @@ -3570,19 +3633,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return expScalar(val, float_type, mod); } @@ -3598,19 +3665,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return exp2Scalar(val, float_type, mod); } @@ -3626,19 +3697,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return logScalar(val, float_type, mod); } @@ -3654,19 +3729,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log2Scalar(val, float_type, mod); } @@ -3682,19 +3761,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log10Scalar(val, float_type, mod); } @@ -3710,19 +3793,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try fabsScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return fabsScalar(val, float_type, mod); } @@ -3738,19 +3825,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floorScalar(val, float_type, mod); } @@ -3766,19 +3857,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return ceilScalar(val, float_type, mod); } @@ -3794,19 +3889,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return roundScalar(val, float_type, mod); } @@ -3822,19 +3921,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return truncScalar(val, float_type, mod); } @@ -3850,7 +3953,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3864,20 +3967,18 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const mulend1_elem = try mulend1.elemValue(mod, i); const mulend2_elem = try mulend2.elemValue(mod, i); const addend_elem = try addend.elemValue(mod, i); - scalar.* = try mulAddScalar( - float_type.scalarType(mod), - mulend1_elem, - mulend2_elem, - addend_elem, - mod, - ); + scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return mulAddScalar(float_type, mulend1, mulend2, addend, mod); } @@ -3899,7 +4000,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3931,22 +4032,22 @@ pub const Value = struct { } pub fn isGenericPoison(val: Value) bool { - return val.ip_index == .generic_poison; + return val.toIntern() == .generic_poison; } /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, - pub const Bytes = struct { + pub const SubValue = struct { base: Payload, - /// Includes the sentinel, if any. - data: []const u8, + data: Value, }; - pub const StrLit = struct { + pub const Bytes = struct { base: Payload, - data: Module.StringLiteralContext.Key, + /// Includes the sentinel, if any. + data: []const u8, }; pub const Aggregate = struct { -- cgit v1.2.3 From 70cc68e9994f7dca53904075e15b2b6f87342539 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 19:23:01 -0400 Subject: Air: remove constant tag Some uses have been moved to their own tag, the rest use interned. Also, finish porting comptime mutation to be more InternPool aware. --- src/Air.zig | 18 +- src/InternPool.zig | 18 +- src/Liveness.zig | 22 +- src/Liveness/Verify.zig | 40 +- src/Module.zig | 9 +- src/Sema.zig | 1245 +++++++++++++++++++---------------------- src/TypedValue.zig | 54 ++ src/arch/aarch64/CodeGen.zig | 12 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 12 +- src/arch/sparc64/CodeGen.zig | 12 +- src/arch/wasm/CodeGen.zig | 5 +- src/arch/x86_64/CodeGen.zig | 19 +- src/codegen/c.zig | 5 +- src/codegen/llvm.zig | 3 +- src/codegen/spirv.zig | 1 - src/print_air.zig | 12 +- src/value.zig | 198 ++++--- tools/lldb_pretty_printers.py | 6 + 19 files changed, 863 insertions(+), 840 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 4f36cf8bc1..95ed7d33f1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -186,6 +186,14 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, + /// This is a special value that tracks a set of types that have been stored + /// to an inferred allocation. It does not support any of the normal value queries. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc, + /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc + /// instructions for comptime code. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. /// Uses the `ty` field. @@ -397,9 +405,6 @@ pub const Inst = struct { /// was executed on the operand. /// Uses the `ty_pl` field. Payload is `TryPtr`. try_ptr, - /// A comptime-known value. Uses the `ty_pl` field, payload is index of - /// `values` array. - constant, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1265,7 +1270,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .assembly, .block, - .constant, .struct_field_ptr, .struct_field_val, .slice_elem_ptr, @@ -1283,6 +1287,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, + .inferred_alloc, + .inferred_alloc_comptime, .ptr_add, .ptr_sub, .try_ptr, @@ -1495,7 +1501,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { - .constant => return air.values[air_datas[inst_index].ty_pl.payload], .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1603,6 +1608,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .mul_with_overflow, .shl_with_overflow, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, .bit_and, .bit_or, @@ -1651,7 +1658,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_neq_optimized, .cmp_vector, .cmp_vector_optimized, - .constant, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index 429b86a8a6..dfde352600 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -515,10 +515,12 @@ pub const Key = union(enum) { pub const ErrorUnion = struct { ty: Index, - val: union(enum) { + val: Value, + + pub const Value = union(enum) { err_name: NullTerminatedString, payload: Index, - }, + }; }; pub const EnumTag = struct { @@ -1068,7 +1070,7 @@ pub const Key = union(enum) { .false, .true => .bool_type, .empty_struct => .empty_struct_type, .@"unreachable" => .noreturn_type, - .generic_poison => unreachable, + .generic_poison => .generic_poison_type, }, }; } @@ -2671,6 +2673,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { + .array_type, .vector_type => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, // TODO: migrate structs to properly use the InternPool rather // than using the SegmentedList trick, then the struct type will // have a slice of comptime values that can be used here for when @@ -3184,7 +3190,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ - .tag = .ptr_elem, + .tag = switch (ptr.addr) { + .elem => .ptr_elem, + .field => .ptr_field, + else => unreachable, + }, .data = try ip.addExtra(gpa, PtrBaseIndex{ .ty = ptr.ty, .base = base_index.base, diff --git a/src/Liveness.zig b/src/Liveness.zig index c30708e140..4f3d87d3c2 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -321,8 +321,9 @@ pub fn categorizeOperand( .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .trap, .breakpoint, @@ -973,9 +974,7 @@ fn analyzeInst( .work_group_id, => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), - .constant, - .interned, - => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .trap, .unreach, @@ -1269,10 +1268,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; _ = try data.live_set.put(gpa, operand, {}); } @@ -1305,10 +1301,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; const mask = @as(Bpi, 1) << @intCast(OperandInt, i); @@ -1839,10 +1832,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); - switch (inst_tags[operand]) { - .constant, .interned => return, - else => {}, - } + if (inst_tags[operand] == .interned) return // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 703d561559..cbb7f9f143 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -41,8 +41,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // no operands .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .breakpoint, .dbg_stmt, @@ -554,16 +555,18 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndexAllowNone(op_ref) orelse return; - switch (self.air.instructions.items(.tag)[operand]) { - .constant, .interned => {}, - else => { - if (dies) { - if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); - } else { - if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); - } - }, + const operand = Air.refToIndexAllowNone(op_ref) orelse { + assert(!dies); + return; + }; + if (self.air.instructions.items(.tag)[operand] == .interned) { + assert(!dies); + return; + } + if (dies) { + if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); + } else { + if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); } } @@ -576,16 +579,11 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } - const tag = self.air.instructions.items(.tag); - switch (tag[inst]) { - .constant, .interned => unreachable, - else => { - if (self.liveness.isUnused(inst)) { - assert(!self.live.contains(inst)); - } else { - try self.live.putNoClobber(self.gpa, inst, {}); - } - }, + if (self.air.instructions.items(.tag)[inst] == .interned) return; + if (self.liveness.isUnused(inst)) { + assert(!self.live.contains(inst)); + } else { + try self.live.putNoClobber(self.gpa, inst, {}); } } diff --git a/src/Module.zig b/src/Module.zig index 47f7643b9f..d3045631c5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -764,14 +764,7 @@ pub const Decl = struct { pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; - return TypedValue{ - .ty = decl.ty, - .val = decl.val, - }; - } - - pub fn value(decl: *Decl) error{AnalysisFail}!Value { - return (try decl.typedValue()).val; + return TypedValue{ .ty = decl.ty, .val = decl.val }; } pub fn isFunction(decl: Decl, mod: *const Module) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index a7416af286..55adc2fffb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1991,23 +1991,21 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { - if (air_tags[i] == .constant) { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + if (air_tags[i] == .interned) { + const interned = sema.air_instructions.items(.data)[i].interned; + const val = interned.toValue(); if (val.getVariable(sema.mod) != null) return val; } return opv; } const air_datas = sema.air_instructions.items(.data); switch (air_tags[i]) { - .constant => { - const ty_pl = air_datas[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + .interned => { + const val = air_datas[i].interned.toValue(); if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .interned => return air_datas[i].interned.toValue(), else => return null, } } @@ -2440,64 +2438,64 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { - if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { - .inferred_alloc => { - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; - // Add the stored instruction to the set we will use to resolve peer types - // for the inferred allocation. - // This instruction will not make it to codegen; it is only to participate - // in the `stored_inst_list` of the `inferred_alloc`. - var trash_block = block.makeSubBlock(); - defer trash_block.instructions.deinit(sema.gpa); - const operand = try trash_block.addBitCast(pointee_ty, .void_value); - - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + switch (sema.air_instructions.items(.tag)[ptr_inst]) { + .inferred_alloc => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; + // Add the stored instruction to the set we will use to resolve peer types + // for the inferred allocation. + // This instruction will not make it to codegen; it is only to participate + // in the `stored_inst_list` of the `inferred_alloc`. + var trash_block = block.makeSubBlock(); + defer trash_block.instructions.deinit(sema.gpa); + const operand = try trash_block.addBitCast(pointee_ty, .void_value); + + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = inferred_alloc.alignment, + .@"addrspace" = addr_space, + }); + const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ - .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, - }); + try inferred_alloc.prongs.append(sema.arena, .{ + .stored_inst = operand, + .placeholder = Air.refToIndex(bitcasted_ptr).?, + }); - return bitcasted_ptr; - }, - .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - // There will be only one coerce_result_ptr because we are running at comptime. - // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - pointee_ty, - Value.undef, - iac.data.alignment, - ); - if (iac.data.alignment != 0) { - try sema.resolveTypeLayout(pointee_ty); - } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, - }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), - .addr = .{ .mut_decl = .{ - .decl = iac.data.decl_index, - .runtime_index = block.runtime_index, - } }, - } })).toValue()); - }, - else => {}, - } + return bitcasted_ptr; + }, + .inferred_alloc_comptime => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + // There will be only one coerce_result_ptr because we are running at comptime. + // The alloc will turn into a Decl. + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + iac.data.decl_index = try anon_decl.finish( + pointee_ty, + Value.undef, + iac.data.alignment, + ); + if (iac.data.alignment != 0) { + try sema.resolveTypeLayout(pointee_ty); + } + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = iac.data.alignment, + .@"addrspace" = addr_space, + }); + try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); + }, + else => {}, } } @@ -3458,6 +3456,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; @@ -3487,13 +3486,19 @@ fn zirAllocExtended( if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = alignment, - }), - ); + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = alignment, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } } @@ -3511,17 +3516,19 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = alignment, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3616,16 +3623,24 @@ fn zirAllocInferredComptime( inst: Zir.Inst.Index, inferred_alloc_ty: Type, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3676,31 +3691,39 @@ fn zirAllocInferred( const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; + const ty_inst = try sema.addType(inferred_alloc_ty); if (block.is_comptime) { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = 0, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -3712,7 +3735,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; const var_is_mut = switch (sema.typeOf(ptr).toIntern()) { @@ -3722,7 +3744,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com }; const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; const decl_index = iac.data.decl_index; @@ -3767,7 +3789,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = constant + // %a = interned // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3814,7 +3836,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const candidate = block.instructions.items[search_index]; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .constant => break candidate, + .interned => break candidate, else => break :ct, } }; @@ -4981,15 +5003,15 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk; - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + const air_data = sema.air_instructions.items(.data)[ptr_inst]; + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); }, @@ -5009,11 +5031,10 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const ptr = try sema.resolveInst(bin_inst.lhs); const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); @@ -6988,16 +7009,7 @@ fn analyzeCall( const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - const ty_inst = try sema.addType(fn_ret_ty); - try sema.air_values.append(gpa, result.val); - sema.air_instructions.set(block_inst, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, - }); - break :res2 Air.indexToRef(block_inst); + break :res2 try sema.addConstant(fn_ret_ty, result.val); } } @@ -9407,7 +9419,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.generic_poison); + const result = try sema.addConstant(Type.generic_poison, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -15104,7 +15116,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -25378,8 +25390,8 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(result_ty, elem_ptr); }; const result_ty = try sema.elemPtrType(indexable_ty, null); @@ -25424,8 +25436,9 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); - if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; @@ -25684,7 +25697,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25740,8 +25753,9 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); - if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } runtime_src = slice_src; @@ -25800,7 +25814,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25916,7 +25930,10 @@ fn coerceExtra( // null to ?T if (inst_ty.zigTypeTag(mod) == .Null) { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = .none, + } })).toValue()); } // cast from ?*T and ?[*]T to ?*anyopaque @@ -27665,43 +27682,40 @@ fn storePtrVal( switch (mut_kit.pointee) { .direct => |val_ptr| { if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { - if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { + if (!operand_val.eql(val_ptr.*, operand_ty, mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); } return; } - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - val_ptr.* = try operand_val.copy(arena); + val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue(); }, .reinterpret => |reinterpret| { const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); + const arena = mut_kit.beginArena(mod); + defer mut_kit.finishArena(mod); - reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)}); + return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(mod)}); }, } } @@ -27754,7 +27768,7 @@ fn beginComptimePtrMutation( const mod = sema.mod; const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; switch (ptr.addr) { - .decl => unreachable, // isComptimeMutablePtr has been checked already + .decl, .int => unreachable, // isComptimeMutablePtr has been checked already .mut_decl => |mut_decl| { const decl = mod.declPtr(mut_decl.decl); return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); @@ -27767,546 +27781,472 @@ fn beginComptimePtrMutation( .runtime_index = .comptime_field_ptr, }); }, - else => unreachable, - } - if (true) unreachable; - switch (ptr_val.toIntern()) { - .none => switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); - }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; - const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), - .runtime_index = .comptime_field_ptr, - }); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } + .eu_payload => |eu_ptr| { + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = Value.undef, + }; - switch (val_ptr.toIntern()) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + val_ptr.* = Value.initPayload(&payload.base); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = eu_ty, + }, + } + }, + .opt_payload => |opt_ptr| { + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.ip_index) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, - else => unreachable, - }, - else => unreachable, - } + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = opt_ty, + }, + } + }, + .elem => |elem_ptr| { + const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, }; } - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); + switch (val_ptr.ip_index) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.toIntern()) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &fields[field_index], + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } - val_ptr.* = Value.initPayload(&payload.base); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &payload.data.val, + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - } - }, + + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ), + else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + }, + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.mut_decl, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!base_elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = base_elem_ty, + }; + } - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field => |field_ptr| { + const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + const field_index = @intCast(u32, field_ptr.index); + + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.ip_index) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &elems[field_index], + &fields[field_index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); + val_ptr.* = Value.initPayload(&payload.base); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.val, + &payload.data.val, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - else => unreachable, + else => unreachable, + } }, - else => unreachable, - }, - else => unreachable, + } }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = val_ptr.*; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.mut_decl, + ); }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.mut_decl, + ), + .repeated => { const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = Value.initPayload(&payload.base); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.toIntern()) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.undef, - }; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &payload.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .slice => switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), - val_ptr.* = Value.initPayload(&payload.base); + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, + else => unreachable, + }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, + else => unreachable, }, - } - }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { - else => unreachable, + else => unreachable, + }, + .reinterpret => |reinterpret| { + const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } }, } } @@ -28418,6 +28358,7 @@ fn beginComptimePtrLoad( .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; + const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl.getVariable(mod) != null) return error.RuntimeLoad; @@ -28426,7 +28367,7 @@ fn beginComptimePtrLoad( break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = false, + .is_mutable = is_mutable, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, @@ -29411,7 +29352,7 @@ fn analyzeDeclVal( const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { - if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { + if (sema.air_instructions.items(.tag)[index] == .interned and !block.is_typeof) { try sema.decl_val_table.put(sema.gpa, decl_index, result); } } @@ -30049,8 +29990,8 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); - const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); + const elem_ptr = try ptr_val.elemPtr(try sema.elemPtrType(new_ptr_ty, sentinel_index), sentinel_index, sema.mod); + const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, @@ -33421,35 +33362,24 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; - if (val.ip_index != .none) { - if (@enumToInt(val.toIntern()) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); - try sema.air_instructions.append(gpa, .{ - .tag = .interned, - .data = .{ .interned = val.toIntern() }, - }); - const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - // This assertion can be removed when the `ty` parameter is removed from - // this function thanks to the InternPool transition being complete. - if (std.debug.runtime_safety) { - const val_ty = sema.typeOf(result); - if (!Type.eql(val_ty, ty, sema.mod)) { - std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ - ty.fmt(sema.mod), val_ty.fmt(sema.mod), - }); - } + + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + if (std.debug.runtime_safety) { + const val_ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty.toIntern() != val_ty) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(mod), val_ty.toType().fmt(mod), + }); } - return result; } - const ty_inst = try sema.addType(ty); - try sema.air_values.append(gpa, val); + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, + .tag = .interned, + .data = .{ .interned = val.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } @@ -33606,7 +33536,7 @@ pub fn analyzeAddressSpace( fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { const mod = sema.mod; const load_ty = ptr_ty.childType(mod); - const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); + const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty); switch (res) { .runtime_load => return null, .val => |v| return v, @@ -33632,7 +33562,7 @@ const DerefResult = union(enum) { out_of_bounds: Type, }; -fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { +fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult { const mod = sema.mod; const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { @@ -33647,13 +33577,8 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - if (deref.is_mutable and want_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return DerefResult{ .val = try tv.val.copy(sema.arena) }; - } - return DerefResult{ .val = tv.val }; + // Move mutable decl values to the InternPool and assert other decls are already in the InternPool. + return .{ .val = (if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern()).toValue() }; } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d82fb72dea..020686f86e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -124,6 +124,60 @@ pub fn print( } return writer.writeAll(" }"); }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + if (elem_val.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; + } + + // TODO would be nice if this had a bit of unicode awareness. + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + try print(.{ + .ty = elem_ty, + .val = elem_val, + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + }, + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(mod); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + return print(.{ .ty = ty, .val = val }, writer, level, mod); + }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 16b103c898..54a34e8f09 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -845,8 +845,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -919,8 +918,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6155,15 +6153,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index f0a44b72a8..8f1a8fdb67 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -829,8 +829,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -903,8 +902,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6103,15 +6101,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7f4715a451..660630503d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -659,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -730,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -2557,15 +2555,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 9f44dc0e8a..c7376a6eb7 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -679,8 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -4423,8 +4422,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4553,15 +4551,15 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { switch (self.air.instructions.items(.tag)[inst]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const interned = self.air.instructions.items(.data)[inst].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 85fc8346f8..b4e627e957 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -883,7 +883,7 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const inst = Air.refToIndex(ref) orelse return; - if (func.air.instructions.items(.tag)[inst] == .constant) return; + assert(func.air.instructions.items(.tag)[inst] != .interned); // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. @@ -1832,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f2ac985844..48504dee8f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1922,8 +1922,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2097,10 +2096,8 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - switch (self.air.instructions.items(.tag)[inst]) { - .constant => unreachable, - else => self.inst_tracking.getPtr(inst).?.die(self, inst), - } + assert(self.air.instructions.items(.tag)[inst] != .interned); + self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -2876,8 +2873,8 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { - .constant => { - const src_val = self.air.values[air_data[inst].ty_pl.payload]; + .interned => { + const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @@ -11584,11 +11581,11 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { - .constant => tracking: { + .interned => tracking: { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = (try self.air.value(ref, mod)).?, + .val = self.air.instructions.items(.data)[inst].interned.toValue(), })); break :tracking gop.value_ptr; }, @@ -11605,7 +11602,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { - .constant => &self.const_tracking, + .interned => &self.const_tracking, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 76533b4284..f97292e510 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2890,8 +2890,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .arg => try airArg(f, inst), @@ -7783,8 +7782,8 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { const ref_inst = Air.refToIndex(ref) orelse return; + assert(f.air.instructions.items(.tag)[ref_inst] != .interned); const c_value = (f.value_map.fetchRemove(ref_inst) orelse return).value; - if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const local_index = switch (c_value) { .local, .new_local => |l| l, else => return, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0c12faf751..46b126ad84 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4530,8 +4530,7 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 2b04e03a5a..1a19bbdf91 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1807,7 +1807,6 @@ pub const DeclGen = struct { .br => return self.airBr(inst), .breakpoint => return, .cond_br => return self.airCondBr(inst), - .constant => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 58e4029543..204f5ddeb9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -93,14 +93,10 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { + if (tag != .interned) continue; const inst = @intCast(Air.Inst.Index, i); - switch (tag) { - .constant, .interned => { - try w.writeInst(s, inst); - try s.writeByte('\n'); - }, - else => continue, - } + try w.writeInst(s, inst); + try s.writeByte('\n'); } } @@ -304,7 +300,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .constant => try w.writeConstant(s, inst), + .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), diff --git a/src/value.zig b/src/value.zig index 02f4422dda..9244e33ad5 100644 --- a/src/value.zig +++ b/src/value.zig @@ -35,6 +35,22 @@ pub const Value = struct { // The first section of this enum are tags that require no payload. // After this, the tag requires a payload. + /// When the type is error union: + /// * If the tag is `.@"error"`, the error union is an error. + /// * If the tag is `.eu_payload`, the error union is a payload. + /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union + /// is non-error, but the inner error union is an error, is represented as + /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. + eu_payload, + /// When the type is optional: + /// * If the tag is `.null_value`, the optional is null. + /// * If the tag is `.opt_payload`, the optional is a payload. + /// * A nested optional such as `??T` in which the the outer optional + /// is non-null, but the inner optional is null, is represented as + /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. + opt_payload, + /// Pointer and length as sub `Value` objects. + slice, /// A slice of u8 whose memory is managed externally. bytes, /// This value is repeated some number of times. The amount of times to repeat @@ -58,14 +74,16 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .repeated => Payload.SubValue, - + .eu_payload, + .opt_payload, + .repeated, + => Payload.SubValue, + .slice => Payload.Slice, .bytes => Payload.Bytes, - - .inferred_alloc => Payload.InferredAlloc, - .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, + .inferred_alloc => Payload.InferredAlloc, + .inferred_alloc_comptime => Payload.InferredAllocComptime, }; } @@ -172,7 +190,10 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .repeated => { + .eu_payload, + .opt_payload, + .repeated, + => { const payload = self.cast(Payload.SubValue).?; const new_payload = try arena.create(Payload.SubValue); new_payload.* = .{ @@ -184,6 +205,21 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, + .slice => { + const payload = self.castTag(.slice).?; + const new_payload = try arena.create(Payload.Slice); + new_payload.* = .{ + .base = payload.base, + .data = .{ + .ptr = try payload.data.ptr.copy(arena), + .len = try payload.data.len.copy(arena), + }, + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -263,6 +299,15 @@ pub const Value = struct { try out_stream.writeAll("(repeated) "); val = val.castTag(.repeated).?.data; }, + .eu_payload => { + try out_stream.writeAll("(eu_payload) "); + val = val.castTag(.repeated).?.data; + }, + .opt_payload => { + try out_stream.writeAll("(opt_payload) "); + val = val.castTag(.repeated).?.data; + }, + .slice => return out_stream.writeAll("(slice)"), .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), }; @@ -1653,13 +1698,18 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), + .Pointer => { + assert(ty.isSlice(mod)); + const slice = val.castTag(.slice).?.data; + const ptr_ty = ty.slicePtrFieldType(mod); + slice.ptr.hashUncoerced(ptr_ty, hasher, mod); + }, .Type, .Float, .ComptimeFloat, .Bool, .Int, .ComptimeInt, - .Pointer, .Fn, .Optional, .ErrorSet, @@ -1799,9 +1849,15 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .repeated => val.castTag(.repeated).?.data, + .aggregate => val.castTag(.aggregate).?.data[index], + .slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1829,7 +1885,7 @@ pub const Value = struct { }, else => unreachable, }, - } + }; } pub fn isLazyAlign(val: Value, mod: *Module) bool { @@ -1875,25 +1931,28 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.is_threadlocal, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl_index| { - const decl = mod.declPtr(decl_index); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); - }, - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); + return switch (val.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, - .int => false, - .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), - .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), - .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), + else => false, }, - else => false, }; } @@ -1926,9 +1985,21 @@ pub const Value = struct { } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -1941,7 +2012,7 @@ pub const Value = struct { .un => |un| un.val.toValue(), else => unreachable, }, - } + }; } pub fn unionTag(val: Value, mod: *Module) Value { @@ -1956,36 +2027,17 @@ pub const Value = struct { /// Returns a pointer to the element value at the index. pub fn elemPtr( val: Value, - ty: Type, + elem_ptr_ty: Type, index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(mod); - const ptr_ty_key = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; - assert(ptr_ty_key.host_size == 0); - assert(ptr_ty_key.bit_offset == 0); - assert(ptr_ty_key.vector_index == .none); - const elem_alignment = InternPool.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - const alignment = switch (ptr_ty_key.alignment) { - .none => .none, - else => ptr_ty_key.alignment.min( - @intToEnum(InternPool.Alignment, @ctz(index * elem_ty.abiSize(mod))), - ), - }; - const ptr_ty = try mod.ptrType(.{ - .elem_type = elem_ty.toIntern(), - .alignment = if (alignment == elem_alignment) .none else alignment, - .is_const = ptr_ty_key.is_const, - .is_volatile = ptr_ty_key.is_volatile, - .is_allowzero = ptr_ty_key.is_allowzero, - .address_space = ptr_ty_key.address_space, - }); + const elem_ty = elem_ptr_ty.childType(mod); const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2001,7 +2053,7 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = ptr_val.toIntern(), .index = index, @@ -4058,9 +4110,12 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const SubValue = struct { + pub const Slice = struct { base: Payload, - data: Value, + data: struct { + ptr: Value, + len: Value, + }, }; pub const Bytes = struct { @@ -4069,6 +4124,11 @@ pub const Value = struct { data: []const u8, }; + pub const SubValue = struct { + base: Payload, + data: Value, + }; + pub const Aggregate = struct { base: Payload, /// Field values. The types are according to the struct or array type. @@ -4076,6 +4136,18 @@ pub const Value = struct { data: []Value, }; + pub const Union = struct { + pub const base_tag = Tag.@"union"; + + base: Payload = .{ .tag = base_tag }, + data: Data, + + pub const Data = struct { + tag: Value, + val: Value, + }; + }; + pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; @@ -4110,18 +4182,6 @@ pub const Value = struct { alignment: u32, }, }; - - pub const Union = struct { - pub const base_tag = Tag.@"union"; - - base: Payload = .{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - tag: Value, - val: Value, - }; - }; }; pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 555cda135d..36e497afb9 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -682,4 +682,10 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Float.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Ptr.Addr', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Aggregate.Storage', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True) -- cgit v1.2.3 From 66c43968546e38879a2d4c3f2264e10676deef73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 May 2023 23:04:15 -0700 Subject: AIR: eliminate the `values` array --- src/Air.zig | 16 +++++++++------- src/Module.zig | 1 - src/Sema.zig | 27 +++++++++------------------ src/arch/aarch64/CodeGen.zig | 4 ++-- src/arch/arm/CodeGen.zig | 4 ++-- src/arch/riscv64/CodeGen.zig | 4 ++-- src/arch/sparc64/CodeGen.zig | 4 ++-- src/arch/wasm/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 ++-- src/codegen/c.zig | 6 +++--- src/codegen/llvm.zig | 10 +++++----- src/codegen/spirv.zig | 2 +- src/print_air.zig | 22 ++++++---------------- 13 files changed, 44 insertions(+), 62 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index e6cfc8c116..56f7d4cf01 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -17,7 +17,6 @@ instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []const u32, -values: []const Value, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -421,10 +420,10 @@ pub const Inst = struct { /// Marks the end of a semantic scope for debug info variables. dbg_block_end, /// Marks the start of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_begin, /// Marks the end of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_end, /// Marks the beginning of a local variable. The operand is a pointer pointing /// to the storage for the variable. The local may be a const or a var. @@ -967,6 +966,10 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_fn: struct { + ty: Ref, + func: Module.Fn.Index, + }, br: struct { block_inst: Index, operand: Ref, @@ -1090,8 +1093,7 @@ pub const FieldParentPtr = struct { pub const Shuffle = struct { a: Inst.Ref, b: Inst.Ref, - // index to air_values - mask: u32, + mask: InternPool.Index, mask_len: u32, }; @@ -1469,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end u32 => air.extra[i], Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), i32 => @bitCast(i32, air.extra[i]), - else => @compileError("bad field type"), + InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } @@ -1482,7 +1485,6 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.instructions.deinit(gpa); gpa.free(air.extra); - gpa.free(air.values); air.* = undefined; } diff --git a/src/Module.zig b/src/Module.zig index 76e2142ae6..3dd89f1269 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5720,7 +5720,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), - .values = try sema.air_values.toOwnedSlice(gpa), }; } diff --git a/src/Sema.zig b/src/Sema.zig index b0d36c4699..0034810846 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17,7 +17,6 @@ perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, -air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -772,7 +771,6 @@ pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); - sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.types_to_resolve.deinit(gpa); @@ -2018,10 +2016,8 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( } const air_datas = sema.air_instructions.items(.data); const val = switch (air_tags[i]) { - .inferred_alloc, .inferred_alloc_comptime => val: { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - break :val sema.air_values.items[ty_pl.payload]; - }, + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, .interned => air_datas[i].interned.toValue(), else => return null, }; @@ -7930,20 +7926,17 @@ fn emitDbgInline( new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { - if (sema.mod.comp.bin_file.options.strip) return; + const mod = sema.mod; + if (mod.comp.bin_file.options.strip) return; // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.toIntern(), - .index = new_func, - } })).toValue()); _ = try block.addInst(.{ .tag = tag, - .data = .{ .ty_pl = .{ + .data = .{ .ty_fn = .{ .ty = try sema.addType(new_func_ty), - .payload = @intCast(u32, sema.air_values.items.len - 1), + .func = new_func, } }, }); } @@ -21724,8 +21717,6 @@ fn analyzeShuffle( } } - const mask_index = @intCast(u32, sema.air_values.items.len); - try sema.air_values.append(sema.gpa, mask); return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ @@ -21733,7 +21724,7 @@ fn analyzeShuffle( .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, - .mask = mask_index, + .mask = mask.toIntern(), .mask_len = mask_len, }), } }, @@ -33311,7 +33302,6 @@ pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, - .values = sema.air_values.items, }; } @@ -33371,7 +33361,8 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + InternPool.Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 54a34e8f09..3afb510d43 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4621,9 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 8f1a8fdb67..5f476a2e80 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4568,9 +4568,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 660630503d..5417650dd5 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1875,9 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c7376a6eb7..354af50b61 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1660,9 +1660,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d9cb56404a..0c77197417 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4947,7 +4947,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const a = try func.resolveInst(extra.a); const b = try func.resolveInst(extra.b); - const mask = func.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const child_ty = inst_ty.childType(mod); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 48504dee8f..00f5b3f3da 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8541,9 +8541,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2dcc332713..59d00f5849 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4302,10 +4302,10 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { - const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = f.air.instructions.items(.data)[inst].ty_fn; const mod = f.object.dg.module; const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6612,7 +6612,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; - const mask = f.air.values[extra.mask]; + const mask = extra.mask.toValue(); const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index bbedc1160c..dd07b5edbd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5927,10 +5927,10 @@ pub const FuncGen = struct { fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const dib = self.dg.object.di_builder orelse return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -5986,10 +5986,10 @@ pub const FuncGen = struct { fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { if (self.dg.object.di_builder == null) return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8875,7 +8875,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); const b = try self.resolveInst(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 43b6741493..80e98dbcd3 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2074,7 +2074,7 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/print_air.zig b/src/print_air.zig index 800fbc43c2..be7bc9610d 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -15,12 +15,11 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo // the debug safety tag but we want to measure release size. (@sizeOf(Air.Inst.Tag) + 8); const extra_bytes = air.extra.len * @sizeOf(u32); - const values_bytes = air.values.len * @sizeOf(Value); const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0; const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0; const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0; const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + - values_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + @sizeOf(Liveness) + liveness_extra_bytes + liveness_special_bytes + tomb_bytes; // zig fmt: off @@ -28,7 +27,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo \\# Total AIR+Liveness bytes: {} \\# AIR Instructions: {d} ({}) \\# AIR Extra Data: {d} ({}) - \\# AIR Values Bytes: {d} ({}) \\# Liveness tomb_bits: {} \\# Liveness Extra Data: {d} ({}) \\# Liveness special table: {d} ({}) @@ -37,7 +35,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo fmtIntSizeBin(total_bytes), air.instructions.len, fmtIntSizeBin(instruction_bytes), air.extra.len, fmtIntSizeBin(extra_bytes), - air.values.len, fmtIntSizeBin(values_bytes), fmtIntSizeBin(tomb_bytes), if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes), if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes), @@ -300,7 +297,8 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), + .inferred_alloc => @panic("TODO"), + .inferred_alloc_comptime => @panic("TODO"), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -598,14 +596,6 @@ const Writer = struct { try s.print(", {d}", .{extra.field_index}); } - fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const val = w.air.values[ty_pl.payload]; - const ty = w.air.getRefType(ty_pl.ty); - try w.writeType(s, ty); - try s.print(", {}", .{val.fmtValue(ty, w.module)}); - } - fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const mod = w.module; const ip_index = w.air.instructions.items(.data)[inst].interned; @@ -693,9 +683,9 @@ const Writer = struct { } fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); - const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); + const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; + const func_index = ty_fn.func; + const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); try s.print("{s}", .{owner_decl.name}); } -- cgit v1.2.3 From 90a877f462fce8bee69ad366aac66805a7c00571 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 13:54:22 -0700 Subject: InternPool: pass by const pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Zig language allows the compiler to make this optimization automatically. We should definitely make the compiler do that, and revert this commit. However, that will not happen in this branch, and I want to continue to explore achieving performance parity with merge-base. So, this commit changes all InternPool parameters to be passed by const pointer rather than by value. I measured a 1.03x ± 0.03 speedup vs the previous commit compiling the (set of passing) behavior tests. Against merge-base, this commit is 1.17x ± 0.04 slower, which is an improvement from the previous measurement of 1.22x ± 0.02. Related issue: #13510 Related issue: #14129 Related issue: #15688 --- src/Air.zig | 8 ++--- src/InternPool.zig | 84 ++++++++++++++++++++++---------------------- src/Liveness.zig | 8 ++--- src/Liveness/Verify.zig | 4 +-- src/Module.zig | 2 +- src/Sema.zig | 2 +- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 6 ++-- src/arch/riscv64/CodeGen.zig | 6 ++-- src/arch/sparc64/CodeGen.zig | 6 ++-- src/arch/wasm/CodeGen.zig | 6 ++-- src/arch/x86_64/CodeGen.zig | 6 ++-- src/codegen/c.zig | 6 ++-- src/codegen/llvm.zig | 10 +++--- src/codegen/spirv.zig | 6 ++-- src/print_air.zig | 2 +- src/type.zig | 20 +++++------ 17 files changed, 94 insertions(+), 94 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/src/Air.zig b/src/Air.zig index 56f7d4cf01..b179a3c024 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1182,7 +1182,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { return InternPool.static_keys[ref_int].typeOf().toType(); @@ -1190,7 +1190,7 @@ pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1520,7 +1520,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), - else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1537,7 +1537,7 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { /// because it can cause side effects. If an instruction does not need to be /// lowered, and Liveness determines its result is unused, backends should /// avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, diff --git a/src/InternPool.zig b/src/InternPool.zig index 7debd2c2a3..ffd72245d5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2992,7 +2992,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); const param_types = @ptrCast( []Index, @@ -3015,7 +3015,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { }; } -fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @ptrCast( []const NullTerminatedString, @@ -3038,7 +3038,7 @@ fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key } }; } -fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ .ty = int_info.ty, @@ -4351,7 +4351,7 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { @@ -4384,12 +4384,12 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: }; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { return extraDataTrail(ip, T, index).data; } /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: InternPool, comptime T: type, index: usize) T { +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { switch (@sizeOf(Limb)) { @sizeOf(u32) => return extraData(ip, T, index), @sizeOf(u64) => {}, @@ -4413,7 +4413,7 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { } /// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { const field_count = @typeInfo(S).Struct.fields.len; switch (@sizeOf(Limb)) { @sizeOf(u32) => { @@ -4433,7 +4433,7 @@ const LimbsAsIndexes = struct { len: u32, }; -fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { const host_slice = switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items, @sizeOf(u64) => ip.limbs.items, @@ -4447,7 +4447,7 @@ fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { } /// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: InternPool, limbs: LimbsAsIndexes) []const Limb { +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { return switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], @@ -4485,7 +4485,7 @@ test "basic usage" { try std.testing.expect(another_array_i32 == array_i32); } -pub fn childType(ip: InternPool, i: Index) Index { +pub fn childType(ip: *const InternPool, i: Index) Index { return switch (ip.indexToKey(i)) { .ptr_type => |ptr_type| ptr_type.elem_type, .vector_type => |vector_type| vector_type.child, @@ -4496,7 +4496,7 @@ pub fn childType(ip: InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: InternPool, i: Index) Index { +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { switch (i) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, @@ -4510,7 +4510,7 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: InternPool, i: Index) Index { +pub fn slicePtr(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, @@ -4519,7 +4519,7 @@ pub fn slicePtr(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: InternPool, i: Index) Index { +pub fn sliceLen(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, @@ -4702,7 +4702,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }); } -pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_struct) return .none; @@ -4710,7 +4710,7 @@ pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); switch (tags[@enumToInt(val)]) { @@ -4721,7 +4721,7 @@ pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { assert(val != .none); const tags = ip.items.items(.tag); const datas = ip.items.items(.data); @@ -4731,7 +4731,7 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } -pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .func) return .none; @@ -4739,7 +4739,7 @@ pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); } -pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; @@ -4748,7 +4748,7 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre } /// includes .comptime_int_type -pub fn isIntegerType(ip: InternPool, ty: Index) bool { +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .usize_type, .isize_type, @@ -4769,7 +4769,7 @@ pub fn isIntegerType(ip: InternPool, ty: Index) bool { } /// does not include .enum_literal_type -pub fn isEnumType(ip: InternPool, ty: Index) bool { +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .atomic_order_type, .atomic_rmw_op_type, @@ -4783,35 +4783,35 @@ pub fn isEnumType(ip: InternPool, ty: Index) bool { }; } -pub fn isFunctionType(ip: InternPool, ty: Index) bool { +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .func_type; } -pub fn isPointerType(ip: InternPool, ty: Index) bool { +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .ptr_type; } -pub fn isOptionalType(ip: InternPool, ty: Index) bool { +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .opt_type; } /// includes .inferred_error_set_type -pub fn isErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { return ty == .anyerror_type or switch (ip.indexToKey(ty)) { .error_set_type, .inferred_error_set_type => true, else => false, }; } -pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .inferred_error_set_type; } -pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .error_union_type; } -pub fn isAggregateType(ip: InternPool, ty: Index) bool { +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, @@ -4827,11 +4827,11 @@ pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); } -pub fn dump(ip: InternPool) void { +pub fn dump(ip: *const InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } -fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { +fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; @@ -5023,11 +5023,11 @@ pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { return structPtrConst(ip, index.unwrap() orelse return null); } @@ -5035,7 +5035,7 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } -pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union { +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } @@ -5043,7 +5043,7 @@ pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } -pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } @@ -5051,7 +5051,7 @@ pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.In return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } -pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -5182,7 +5182,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { } } -pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { const string_bytes = ip.string_bytes.items; const start = @enumToInt(s); var end: usize = start; @@ -5190,11 +5190,11 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } -pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { return ip.stringToSlice(s.unwrap() orelse return null); } -pub fn typeOf(ip: InternPool, index: Index) Index { +pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. // An alternative would be to topological sort the static keys, but this would @@ -5382,12 +5382,12 @@ pub fn typeOf(ip: InternPool, index: Index) Index { } /// Assumes that the enum's field indexes equal its value tags. -pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } -pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5397,7 +5397,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5407,7 +5407,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: Index) bool { +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { @@ -5420,7 +5420,7 @@ pub fn isNoReturn(ip: InternPool, ty: Index) bool { /// This is a particularly hot function, so we operate directly on encodings /// rather than the more straightforward implementation of calling `indexToKey`. -pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { return switch (index) { .u1_type, .u8_type, diff --git a/src/Liveness.zig b/src/Liveness.zig index 4f3d87d3c2..b12b638208 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,7 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, - ip: InternPool, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -1139,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1291,7 +1291,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst, ip.*)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1837,7 +1837,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e8b024eb6f..a5fc592894 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -32,7 +32,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index ffc6a95fe1..b1a74932d3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6726,7 +6726,7 @@ pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ .elem_type = new_child.toIntern(), diff --git a/src/Sema.zig b/src/Sema.zig index 8836e89528..b4e07d749e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33624,7 +33624,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5874440e50..d01a93dd0d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -660,7 +660,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6412,10 +6412,10 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 360f52cb30..69a156999b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -644,7 +644,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6317,10 +6317,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5417650dd5..809c388532 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -478,7 +478,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -2737,10 +2737,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3bcdd5ad25..fde5424ddc 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -498,7 +498,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -4883,10 +4883,10 @@ fn wantSafety(self: *Self) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index af2b37312d..e397cf29f8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2076,7 +2076,7 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { const ip = &mod.intern_pool; for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -7436,10 +7436,10 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOf(inst, mod.intern_pool); + return func.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOfIndex(inst, mod.intern_pool); + return func.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dbb3d977b8..b9cc3f7052 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1738,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -11992,10 +11992,10 @@ fn hasAllFeatures(self: *Self, features: anytype) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d705d6143e..0db223c6b6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -489,12 +489,12 @@ pub const Function = struct { fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { const mod = f.object.dg.module; - return f.air.typeOf(inst, mod.intern_pool); + return f.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { const mod = f.object.dg.module; - return f.air.typeOfIndex(inst, mod.intern_pool); + return f.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -2808,7 +2808,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; const result_value = switch (air_tags[inst]) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 606c57b187..8cf6a51ba1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1574,7 +1574,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern()); + const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or ptr_info.address_space != .generic or @@ -4330,7 +4330,7 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const opt_value: ?*llvm.Value = switch (air_tags[inst]) { @@ -8055,7 +8055,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -9920,12 +9920,12 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const mod = fg.dg.module; - return fg.air.typeOf(inst, mod.intern_pool); + return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const mod = fg.dg.module; - return fg.air.typeOfIndex(inst, mod.intern_pool); + return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 0fbcb47f71..ddd7f36435 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1688,7 +1688,7 @@ pub const DeclGen = struct { const mod = self.module; const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; const air_tags = self.air.instructions.items(.tag); @@ -3339,11 +3339,11 @@ pub const DeclGen = struct { fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { const mod = self.module; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { const mod = self.module; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/print_air.zig b/src/print_air.zig index be7bc9610d..8da80e1360 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -978,6 +978,6 @@ const Writer = struct { fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { const mod = w.module; - return w.air.typeOfIndex(inst, mod.intern_pool); + return w.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/type.zig b/src/type.zig index f285caff95..fc7821b50b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -102,7 +102,7 @@ pub const Type = struct { }; } - pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType { return switch (ip.indexToKey(ty)) { .ptr_type => |p| p, .opt_type => |child| switch (ip.indexToKey(child)) { @@ -114,7 +114,7 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern())); + return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern())); } pub fn eql(a: Type, b: Type, mod: *const Module) bool { @@ -1832,10 +1832,10 @@ pub const Type = struct { } pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, mod.intern_pool); + return isVolatilePtrIp(ty, &mod.intern_pool); } - pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -1920,10 +1920,10 @@ pub const Type = struct { /// For *T, returns T. /// For [*]T, returns T. pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, mod.intern_pool); + return childTypeIp(ty, &mod.intern_pool); } - pub fn childTypeIp(ty: Type, ip: InternPool) Type { + pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { return ip.childType(ty.toIntern()).toType(); } @@ -2164,10 +2164,10 @@ pub const Type = struct { /// Asserts the type is an array or vector or struct. pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return arrayLenIp(ty, mod.intern_pool); + return arrayLenIp(ty, &mod.intern_pool); } - pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, @@ -2385,10 +2385,10 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type, mod: *Module) Type { - return fnReturnTypeIp(ty, mod.intern_pool); + return fnReturnTypeIp(ty, &mod.intern_pool); } - pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, .func_type => |func_type| func_type.return_type, -- cgit v1.2.3 From d41111d7ef531f6f55a19c56205d6d2f1134c224 Mon Sep 17 00:00:00 2001 From: Motiejus JakÅ¡tys Date: Fri, 9 Jun 2023 16:02:18 -0700 Subject: mem: rename align*Generic to mem.align* Anecdote 1: The generic version is way more popular than the non-generic one in Zig codebase: git grep -w alignForward | wc -l 56 git grep -w alignForwardGeneric | wc -l 149 git grep -w alignBackward | wc -l 6 git grep -w alignBackwardGeneric | wc -l 15 Anecdote 2: In my project (turbonss) that does much arithmetic and alignment I exclusively use the Generic functions. Anecdote 3: we used only the Generic versions in the Macho Man's linker workshop. --- lib/std/Thread.zig | 8 ++--- lib/std/dynamic_library.zig | 4 +-- lib/std/hash_map.zig | 12 +++---- lib/std/heap.zig | 8 ++--- lib/std/heap/PageAllocator.zig | 12 +++---- lib/std/heap/WasmPageAllocator.zig | 6 ++-- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 1 + lib/std/mem.zig | 56 ++++++++++++------------------ lib/std/mem/Allocator.zig | 4 +-- lib/std/meta/trailer_flags.zig | 6 ++-- lib/std/os/linux/tls.zig | 8 ++--- lib/std/os/uefi/pool_allocator.zig | 4 +-- lib/std/tar.zig | 2 +- lib/std/target.zig | 2 +- lib/std/testing.zig | 2 +- src/Module.zig | 10 +++--- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 10 +++--- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +-- src/arch/wasm/CodeGen.zig | 8 ++--- src/arch/x86_64/CodeGen.zig | 10 +++--- src/codegen.zig | 8 ++--- src/codegen/llvm.zig | 44 +++++++++++------------ src/codegen/spirv.zig | 4 +-- src/link/Coff.zig | 30 ++++++++-------- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 10 +++--- src/link/MachO.zig | 50 +++++++++++++------------- src/link/MachO/CodeSignature.zig | 8 ++--- src/link/MachO/DebugSymbols.zig | 18 +++++----- src/link/MachO/load_commands.zig | 8 ++--- src/link/MachO/thunks.zig | 6 ++-- src/link/MachO/zld.zig | 34 +++++++++--------- src/link/Wasm.zig | 16 ++++----- src/objcopy.zig | 6 ++-- src/type.zig | 22 ++++++------ 39 files changed, 223 insertions(+), 232 deletions(-) (limited to 'src/arch/riscv64/CodeGen.zig') diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 76650a9072..d7bcbee66f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -931,18 +931,18 @@ const LinuxThreadImpl = struct { guard_offset = bytes; bytes += @max(page_size, config.stack_size); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); stack_offset = bytes; - bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); + bytes = std.mem.alignForward(usize, bytes, linux.tls.tls_image.alloc_align); tls_offset = bytes; bytes += linux.tls.tls_image.alloc_size; - bytes = std.mem.alignForward(bytes, @alignOf(Instance)); + bytes = std.mem.alignForward(usize, bytes, @alignOf(Instance)); instance_offset = bytes; bytes += @sizeOf(Instance); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); break :blk bytes; }; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 94da2f4d6d..928d0cc9c3 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -124,7 +124,7 @@ pub const ElfDynLib = struct { // corresponding to the actual LOAD sections. const file_bytes = try os.mmap( null, - mem.alignForward(size, mem.page_size), + mem.alignForward(usize, size, mem.page_size), os.PROT.READ, os.MAP.PRIVATE, fd, @@ -187,7 +187,7 @@ pub const ElfDynLib = struct { // extra nonsense mapped before/after the VirtAddr,MemSiz const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; - const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); + const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 5b539ddaad..8c05dfeca5 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1545,13 +1545,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + new_capacity * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + new_capacity * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = try allocator.alignedAlloc(u8, max_align, total_size); const ptr = @ptrToInt(slice.ptr); @@ -1581,13 +1581,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + cap * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + cap * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size]; allocator.free(slice); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7d2a66df1e..7b4bf3af21 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -83,7 +83,7 @@ const CAllocator = struct { // the aligned address. var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; @@ -249,7 +249,7 @@ pub const wasm_allocator = Allocator{ /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize) usize { const aligned_len = mem.alignAllocLen(full_len, len); - assert(mem.alignForward(aligned_len, mem.page_size) == full_len); + assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); return aligned_len; } @@ -307,7 +307,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @ptrToInt(ptr); - const aligned_addr = mem.alignForward(root_addr, ptr_align); + const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); const buf = @intToPtr([*]u8, aligned_addr)[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; @@ -840,7 +840,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { // which is 16 pages, hence the 32. This test may require to increase // the size of the allocations feeding the `allocator` parameter if they // fail, because of this high over-alignment we want to have. - while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { + while (@ptrToInt(slice.ptr) == mem.alignForward(usize, @ptrToInt(slice.ptr), mem.page_size * 32)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 2c8146caf3..5da570fa42 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -17,7 +17,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { _ = log2_align; assert(n > 0); if (n > maxInt(usize) - (mem.page_size - 1)) return null; - const aligned_len = mem.alignForward(n, mem.page_size); + const aligned_len = mem.alignForward(usize, n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; @@ -54,14 +54,14 @@ fn resize( ) bool { _ = log2_buf_align; _ = return_address; - const new_size_aligned = mem.alignForward(new_size, mem.page_size); + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; - const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); + const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. @@ -73,14 +73,14 @@ fn resize( } return true; } - const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); + const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned <= old_size_aligned) { return true; } return false; } - const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned == buf_aligned_len) return true; @@ -103,7 +103,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v if (builtin.os.tag == .windows) { os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { - const buf_aligned_len = mem.alignForward(slice.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); const ptr = @alignCast(mem.page_size, slice.ptr); os.munmap(ptr[0..buf_aligned_len]); } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index 1370af022c..63ae226196 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -100,7 +100,7 @@ fn extendedOffset() usize { } fn nPages(memsize: usize) usize { - return mem.alignForward(memsize, mem.page_size) / mem.page_size; + return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size; } fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { @@ -170,7 +170,7 @@ fn resize( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); if (new_len > aligned_len) return false; const current_n = nPages(aligned_len); const new_n = nPages(new_len); @@ -190,7 +190,7 @@ fn free( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); const current_n = nPages(aligned_len); const base = nPages(@ptrToInt(buf.ptr)); freePages(base, base + current_n); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c7e0569067..f858510bcf 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -186,7 +186,7 @@ pub const ArenaAllocator = struct { const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(addr, ptr_align); + const adjusted_addr = mem.alignForward(usize, addr, ptr_align); const adjusted_index = self.state.end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index ef88787fc6..51b6c1744f 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -309,6 +309,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn bucketStackFramesStart(size_class: usize) usize { return mem.alignForward( + usize, @sizeOf(BucketHeader) + usedBitsCount(size_class), @alignOf(usize), ); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 87f436d156..23e24b0c09 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4213,23 +4213,17 @@ test "sliceAsBytes preserves pointer attributes" { /// Round an address up to the next (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForward(addr: usize, alignment: usize) usize { - return alignForwardGeneric(usize, addr, alignment); +pub fn alignForward(comptime T: type, addr: T, alignment: T) T { + assert(isValidAlignGeneric(T, alignment)); + return alignBackward(T, addr + (alignment - 1), alignment); } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); - return alignForward(addr, alignment); + return alignForward(usize, addr, alignment); } -/// Round an address up to the next (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -/// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(alignment > 0); - assert(std.math.isPowerOfTwo(alignment)); - return alignBackwardGeneric(T, addr + (alignment - 1), alignment); -} +pub const alignForwardGeneric = @compileError("renamed to alignForward"); /// Force an evaluation of the expression; this tries to prevent /// the compiler from optimizing the computation away even if the @@ -4322,38 +4316,32 @@ test "doNotOptimizeAway" { } test "alignForward" { - try testing.expect(alignForward(1, 1) == 1); - try testing.expect(alignForward(2, 1) == 2); - try testing.expect(alignForward(1, 2) == 2); - try testing.expect(alignForward(2, 2) == 2); - try testing.expect(alignForward(3, 2) == 4); - try testing.expect(alignForward(4, 2) == 4); - try testing.expect(alignForward(7, 8) == 8); - try testing.expect(alignForward(8, 8) == 8); - try testing.expect(alignForward(9, 8) == 16); - try testing.expect(alignForward(15, 8) == 16); - try testing.expect(alignForward(16, 8) == 16); - try testing.expect(alignForward(17, 8) == 24); + try testing.expect(alignForward(usize, 1, 1) == 1); + try testing.expect(alignForward(usize, 2, 1) == 2); + try testing.expect(alignForward(usize, 1, 2) == 2); + try testing.expect(alignForward(usize, 2, 2) == 2); + try testing.expect(alignForward(usize, 3, 2) == 4); + try testing.expect(alignForward(usize, 4, 2) == 4); + try testing.expect(alignForward(usize, 7, 8) == 8); + try testing.expect(alignForward(usize, 8, 8) == 8); + try testing.expect(alignForward(usize, 9, 8) == 16); + try testing.expect(alignForward(usize, 15, 8) == 16); + try testing.expect(alignForward(usize, 16, 8) == 16); + try testing.expect(alignForward(usize, 17, 8) == 24); } /// Round an address down to the previous (or current) aligned address. /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (isValidAlign(alignment)) - return alignBackward(i, alignment); + return alignBackward(usize, i, alignment); assert(alignment != 0); return i - @mod(i, alignment); } /// Round an address down to the previous (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. -pub fn alignBackward(addr: usize, alignment: usize) usize { - return alignBackwardGeneric(usize, addr, alignment); -} - -/// Round an address down to the previous (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { +pub fn alignBackward(comptime T: type, addr: T, alignment: T) T { assert(isValidAlignGeneric(T, alignment)); // 000010000 // example alignment // 000001111 // subtract 1 @@ -4361,6 +4349,8 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { return addr & ~(alignment - 1); } +pub const alignBackwardGeneric = @compileError("renamed to alignBackward"); + /// Returns whether `alignment` is a valid alignment, meaning it is /// a positive power of 2. pub fn isValidAlign(alignment: usize) bool { @@ -4391,7 +4381,7 @@ pub fn isAligned(addr: usize, alignment: usize) bool { } pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { - return alignBackwardGeneric(T, addr, alignment) == addr; + return alignBackward(T, addr, alignment) == addr; } test "isAligned" { @@ -4439,7 +4429,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali const begin_address = @ptrToInt(bytes.ptr); const end_address = begin_address + bytes.len; - const begin_address_aligned = mem.alignForward(begin_address, new_alignment); + const begin_address_aligned = mem.alignForward(usize, begin_address, new_alignment); const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { error.Overflow => return null, }; diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 5110534ed4..4a1ff86721 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -208,7 +208,7 @@ pub fn allocAdvancedWithRetAddr( comptime assert(a <= mem.page_size); if (n == 0) { - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); return @intToPtr([*]align(a) T, ptr)[0..0]; } @@ -267,7 +267,7 @@ pub fn reallocAdvanced( } if (new_n == 0) { self.free(old_mem); - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0]; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index 0c43a5ff28..a4d83dcbb3 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -105,9 +105,9 @@ pub fn TrailerFlags(comptime Fields: type) type { const active = (self.bits & (1 << i)) != 0; if (i == @enumToInt(field)) { assert(active); - return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + return mem.alignForward(usize, off, @alignOf(field_info.type)); } else if (active) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + off = mem.alignForward(usize, off, @alignOf(field_info.type)); off += @sizeOf(field_info.type); } } @@ -123,7 +123,7 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(field.type) == 0) continue; if ((self.bits & (1 << i)) != 0) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field.type)); + off = mem.alignForward(usize, off, @alignOf(field.type)); off += @sizeOf(field.type); } } diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index 311e5609e8..d765e403c8 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -233,7 +233,7 @@ fn initTLS(phdrs: []elf.Phdr) void { l += tls_align_factor - delta; l += @sizeOf(CustomData); tcb_offset = l; - l += mem.alignForward(tls_tcb_size, tls_align_factor); + l += mem.alignForward(usize, tls_tcb_size, tls_align_factor); data_offset = l; l += tls_data_alloc_size; break :blk l; @@ -241,14 +241,14 @@ fn initTLS(phdrs: []elf.Phdr) void { .VariantII => blk: { var l: usize = 0; data_offset = l; - l += mem.alignForward(tls_data_alloc_size, tls_align_factor); + l += mem.alignForward(usize, tls_data_alloc_size, tls_align_factor); // The thread pointer is aligned to p_align tcb_offset = l; l += tls_tcb_size; // The CustomData structure is right after the TCB with no padding // in between so it can be easily found l += @sizeOf(CustomData); - l = mem.alignForward(l, @alignOf(DTV)); + l = mem.alignForward(usize, l, @alignOf(DTV)); dtv_offset = l; l += @sizeOf(DTV); break :blk l; @@ -329,7 +329,7 @@ pub fn initStaticTLS(phdrs: []elf.Phdr) void { // Make sure the slice is correctly aligned. const begin_addr = @ptrToInt(alloc_tls_area.ptr); - const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align); + const begin_aligned_addr = mem.alignForward(usize, begin_addr, tls_image.alloc_align); const start = begin_aligned_addr - begin_addr; break :blk alloc_tls_area[start .. start + tls_image.alloc_size]; }; diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index 8f26aac32c..00b8941974 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -24,7 +24,7 @@ const UefiPoolAllocator = struct { const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); - const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); + const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); const full_len = metadata_len + len; @@ -32,7 +32,7 @@ const UefiPoolAllocator = struct { if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null; const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), ptr_align); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index c570c8e09c..14a9ce5d3f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -116,7 +116,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi const header: Header = .{ .bytes = buffer[start..][0..512] }; start += 512; const file_size = try header.fileSize(); - const rounded_file_size = std.mem.alignForwardGeneric(u64, file_size, 512); + const rounded_file_size = std.mem.alignForward(u64, file_size, 512); const pad_len = @intCast(usize, rounded_file_size - file_size); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { diff --git a/lib/std/target.zig b/lib/std/target.zig index 15bb65cd4b..4c7bcfc37a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1944,7 +1944,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))), + 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 6b1e0bb640..bbb0905121 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -305,7 +305,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const var window_start: usize = 0; if (@max(actual.len, expected.len) > max_window_size) { const alignment = if (T == u8) 16 else 2; - window_start = std.mem.alignBackward(diff_index - @min(diff_index, alignment), alignment); + window_start = std.mem.alignBackward(usize, diff_index - @min(diff_index, alignment), alignment); } const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)]; const expected_truncated = window_start + expected_window.len < expected.len; diff --git a/src/Module.zig b/src/Module.zig index 8c5a86652d..8d9f9593dd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1293,7 +1293,7 @@ pub const Union = struct { payload_align = @max(payload_align, 1); if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ - .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), + .abi_size = std.mem.alignForward(u64, payload_size, payload_align), .abi_align = payload_align, .most_aligned_field = most_aligned_field, .most_aligned_field_size = most_aligned_field_size, @@ -1314,18 +1314,18 @@ pub const Union = struct { if (tag_align >= payload_align) { // {Tag, Payload} size += tag_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); padding = @intCast(u32, size - prev_size); } else { // {Payload, Tag} size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); size += tag_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); padding = @intCast(u32, size - prev_size); } return .{ diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dd752555b7..1355f96231 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -566,7 +566,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; if (math.cast(u12, stack_size)) |size| { @@ -1011,7 +1011,7 @@ fn allocMem( std.math.ceilPowerOfTwoAssert(u32, abi_size); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6328,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 69a156999b..a2a5a3d4d3 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -560,7 +560,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; self.mir_instructions.set(sub_reloc, .{ @@ -991,7 +991,7 @@ fn allocMem( assert(abi_align > 0); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6214,7 +6214,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiAlignment(mod) == 8) - ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); + ncrn = std.mem.alignForward(usize, ncrn, 2); const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { @@ -6229,7 +6229,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else { ncrn = 4; if (ty.toType().abiAlignment(mod) == 8) - nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); + nsaa = std.mem.alignForward(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; nsaa += param_size; @@ -6267,7 +6267,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index e4a07f22bf..a4a4fe472b 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForwardGeneric(u64, total_size, arr_size) / arr_size); + const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 809c388532..c6ac3255c6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -792,7 +792,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align); self.next_stack_offset = offset + abi_size; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b660126604..e339794fd4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -423,7 +423,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + abi.stack_reserved_area; - const stack_size = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align); if (math.cast(i13, stack_size)) |size| { self.mir_instructions.set(save_inst, .{ .tag = .save, @@ -2781,7 +2781,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index aa44dc2bc8..495ca7f6dd 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1286,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // store stack pointer so we can restore it when we return from the function try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size - const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment); + const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); @@ -1531,7 +1531,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { func.stack_alignment = abi_align; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align); + const offset = std.mem.alignForward(u32, func.stack_size, abi_align); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -1564,7 +1564,7 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { func.stack_alignment = abi_alignment; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment); + const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6e13a55008..a33faecca3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2150,7 +2150,7 @@ fn setFrameLoc( const frame_i = @enumToInt(frame_index); if (aligned) { const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i]; - offset.* = mem.alignForwardGeneric(i32, offset.*, alignment); + offset.* = mem.alignForward(i32, offset.*, alignment); } self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); offset.* += self.frame_allocs.items(.abi_size)[frame_i]; @@ -2207,7 +2207,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true); for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true); rsp_offset += stack_frame_align_offset; - rsp_offset = mem.alignForwardGeneric(i32, rsp_offset, @as(i32, 1) << needed_align); + rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@enumToInt(FrameIndex.call_frame)] = @intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]); @@ -11807,7 +11807,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11847,7 +11847,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11858,7 +11858,7 @@ fn resolveCallingConventionValues( else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), } - result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, result.stack_align); + result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align); return result; } diff --git a/src/codegen.zig b/src/codegen.zig index 6145d8778b..430562fe9b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -290,7 +290,7 @@ pub fn generateSymbol( .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -303,7 +303,7 @@ pub fn generateSymbol( const begin = code.items.len; try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -1020,7 +1020,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); + return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align); } } @@ -1029,7 +1029,7 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); + return mem.alignForward(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 47be4148d3..11cd752000 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1633,7 +1633,7 @@ pub const Object = struct { var offset: u64 = 0; offset += ptr_size; - offset = std.mem.alignForwardGeneric(u64, offset, len_align); + offset = std.mem.alignForward(u64, offset, len_align); const len_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1801,7 +1801,7 @@ pub const Object = struct { var offset: u64 = 0; offset += payload_size; - offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); + offset = std.mem.alignForward(u64, offset, non_null_align); const non_null_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1888,12 +1888,12 @@ pub const Object = struct { error_index = 0; payload_index = 1; error_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + payload_offset = std.mem.alignForward(u64, error_size, payload_align); } else { payload_index = 0; error_index = 1; payload_offset = 0; - error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); + error_offset = std.mem.alignForward(u64, payload_size, error_align); } var fields: [2]*llvm.DIType = undefined; @@ -1995,7 +1995,7 @@ pub const Object = struct { const field_size = field_ty.toType().abiSize(mod); const field_align = field_ty.toType().abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = if (tuple.names.len != 0) @@ -2086,7 +2086,7 @@ pub const Object = struct { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); const field_align = field.alignment(mod, layout); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); @@ -2242,10 +2242,10 @@ pub const Object = struct { var payload_offset: u64 = undefined; if (layout.tag_align >= layout.payload_align) { tag_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { payload_offset = 0; - tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align); + tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align); } const tag_di = dib.createMemberType( @@ -2861,9 +2861,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_error_type; fields_buf[1] = llvm_payload_type; const payload_end = - std.mem.alignForwardGeneric(u64, error_size, payload_align) + + std.mem.alignForward(u64, error_size, payload_align) + payload_size; - const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align); + const abi_size = std.mem.alignForward(u64, payload_end, error_align); const padding = @intCast(c_uint, abi_size - payload_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2874,9 +2874,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_payload_type; fields_buf[1] = llvm_error_type; const error_end = - std.mem.alignForwardGeneric(u64, payload_size, error_align) + + std.mem.alignForward(u64, payload_size, error_align) + error_size; - const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align); + const abi_size = std.mem.alignForward(u64, error_end, payload_align); const padding = @intCast(c_uint, abi_size - error_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2910,7 +2910,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2924,7 +2924,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -2979,7 +2979,7 @@ pub const DeclGen = struct { field_align < field_ty_align; big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2993,7 +2993,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3552,7 +3552,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3575,7 +3575,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3650,7 +3650,7 @@ pub const DeclGen = struct { const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3673,7 +3673,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -10274,7 +10274,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -10308,7 +10308,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4fd91aded4..dc1f23dad4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -472,12 +472,12 @@ pub const DeclGen = struct { try self.initializers.append(result_id); self.partial_word.len = 0; - self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word)); + self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word)); } /// Fill the buffer with undefined values until the size is aligned to `align`. fn fillToAlign(self: *@This(), alignment: u32) !void { - const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment); + const target_size = std.mem.alignForward(u32, self.size, alignment); try self.addUndef(target_size - self.size); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7785858dd..202bb71e9b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section const vaddr = blk: { if (index == 0) break :blk self.page_size; const prev_header = self.sections.items(.header)[index - 1]; - break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); + break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100; + const memsz = mem.alignForward(u32, size, self.page_size) * 100; log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ name, off, @@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void { fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void { const header = &self.sections.items(.header)[sect_id]; const increased_size = padToIdeal(needed_size); - const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); - const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size); + const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size); + const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff }); @@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const last_symbol = last.getSymbol(self); const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); + break :blk mem.alignForward(u32, header.virtual_address, alignment); } }; @@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; + const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void { for (offsets.items) |offset| { const rva = sym.value + offset; - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void { if (sym.section_number == .UNDEFINED) continue; const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void { lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); } dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); } @@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void { }; const subsystem: coff.Subsystem = .WINDOWS_CUI; const size_of_image: u32 = self.getSizeOfImage(); - const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment); + const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment); const image_base = self.getImageBase(); const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address; @@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 { fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 { var start: u32 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u32, item_end, min_alignment); + start = mem.alignForward(u32, item_end, min_alignment); } return start; } @@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 { } inline fn getSizeOfImage(self: Coff) u32 { - var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size); + var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size); for (self.sections.items(.header)) |header| { - image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); + image_size += mem.alignForward(u32, header.virtual_size, self.page_size); } return image_size; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9b7772260..3cb1c213e9 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { di_buf.appendAssumeCapacity(0); // segment_selector_size const end_header_offset = di_buf.items.len; - const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2); + const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2); di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset); // Currently only one compilation unit is supported, so the address range is simply diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 15ba9ebecc..e0d0dfc75f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } @@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); } - phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align); + phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset; phdr_table_load.p_filesz = load_align_offset + needed_size; phdr_table_load.p_memsz = load_align_offset + needed_size; @@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; + const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value; const need_realloc = !align_ok or new_block_size > atom.capacity(self); if (!need_realloc) return sym.st_value; return self.allocateAtom(atom_index, new_block_size, alignment); @@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = big_atom_sym.st_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const last_sym = last.getSymbol(self); const ideal_capacity = padToIdeal(last_sym.st_size); const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); // Set up the metadata to be updated, after errors are no longer possible. atom_placement = last_index; break :blk new_start_vaddr; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a3f67bc70a..024fe1f8d9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; + const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.n_value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void { // The first __TEXT segment is immovable and covers MachO header and load commands. self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size }); @@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void { fn calcPagezeroSize(self: *MachO) u64 { const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.base.options.output_mode == .Lib) return 0; if (aligned_pagezero_vmsize == 0) return 0; if (aligned_pagezero_vmsize != pagezero_vmsize) { @@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const section_id = @intCast(u8, self.sections.slice().len); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; - break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); + break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size); + const vmsize = mem.alignForward(u64, opts.size, self.page_size); const off = self.findFreeSpace(opts.size, self.page_size); log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ @@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts var section = macho.section_64{ .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), - .addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment), - .offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment), + .addr = mem.alignForward(u64, vmaddr, opts.alignment), + .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { } header.size = needed_size; - segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size); + segment.filesize = mem.alignForward(u64, needed_size, self.page_size); + segment.vmsize = mem.alignForward(u64, needed_size, self.page_size); } fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { @@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { const segment = self.getSegmentPtr(sect_id); const increased_size = padToIdeal(needed_size); const old_aligned_end = segment.vmaddr + segment.vmsize; - const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size); + const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{ header.segName(), @@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const last_symbol = last.getSymbol(self); const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment); + break :blk mem.alignForward(u64, segment.vmaddr, alignment); } }; @@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void { for (self.segments.items, 0..) |segment, id| { if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { - seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size); + seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } if (seg.fileoff < segment.fileoff + segment.filesize) { - seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size); + seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size); } } try self.writeDyldInfoData(); try self.writeSymtabs(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void { @@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. @@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 { fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 4709560ba7..02511dbe29 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -282,7 +282,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size); + const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -357,7 +357,7 @@ fn parallelHash( ) !void { var wg: WaitGroup = .{}; - const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size; + const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size; assert(self.code_directory.code_slots.items.len >= total_num_chunks); const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); @@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 { pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); // Approx code slots - const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size; + const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size; ssize += total_pages * hash_size; var n_special_slots: u32 = 0; if (self.requirements) |req| { @@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64))); + return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 24a0c9ea34..fdb8c9c816 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { const off = @intCast(u64, self.page_size); const ideal_size: u16 = 200 + 128 + 160 + 250; - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size }); @@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 const segment = self.getDwarfSegmentPtr(); var offset: u64 = segment.fileoff; while (self.detectAllocCollision(offset, object_size)) |item_end| { - offset = mem.alignForwardGeneric(u64, item_end, min_alignment); + offset = mem.alignForward(u64, item_end, min_alignment); } return offset; } @@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { file_size = @max(file_size, header.offset + header.size); } - const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size); + const aligned_size = mem.alignForward(u64, file_size, self.page_size); dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.filesize = aligned_size; dwarf_segment.vmsize = aligned_size; const linkedit = self.getLinkeditSegmentPtr(); - linkedit.vmaddr = mem.alignForwardGeneric( + linkedit.vmaddr = mem.alignForward( u64, dwarf_segment.vmaddr + aligned_size, self.page_size, ); - linkedit.fileoff = mem.alignForwardGeneric( + linkedit.fileoff = mem.alignForward( u64, dwarf_segment.fileoff + aligned_size, self.page_size, @@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { try self.writeStrtab(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size); seg.vmsize = aligned_size; } @@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const nsyms = nlocals + nexports; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64)); + const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64)); const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; @@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void { const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); - const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); - const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64)); + const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); + const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; self.symtab_cmd.stroff = @intCast(u32, offset); diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 228a1ccfaf..5111f53f2a 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld"; fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 { const darwin_path_max = 1024; const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1; - return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64)); + return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64)); } const CalcLCsSizeCtx = struct { @@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), @@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), @@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index 48d1faac6b..7895190005 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { while (true) { const atom = zld.getAtom(group_end); - offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment)); + offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment)); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; @@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } else break; } - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); allocateThunk(zld, thunk_index, offset, header); offset += zld.thunks.items[thunk_index].getSize(); @@ -193,7 +193,7 @@ fn allocateThunk( var offset = base_offset; while (true) { const atom = zld.getAtom(atom_index); - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 4f7e615c79..7902d67d87 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -1207,7 +1207,7 @@ pub const Zld = struct { fn createSegments(self: *Zld) !void { const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (aligned_pagezero_vmsize != pagezero_vmsize) { log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize}); @@ -1466,7 +1466,7 @@ pub const Zld = struct { while (true) { const atom = self.getAtom(atom_index); const atom_alignment = try math.powi(u32, 2, atom.alignment); - const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment); + const atom_offset = mem.alignForward(u64, header.size, atom_alignment); const padding = atom_offset - header.size; const sym = self.getSymbolPtr(atom.getSymbolWithLoc()); @@ -1534,7 +1534,7 @@ pub const Zld = struct { const slice = self.sections.slice(); for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); - const start_aligned = mem.alignForwardGeneric(u64, start, alignment); + const start_aligned = mem.alignForward(u64, start, alignment); const n_sect = @intCast(u8, indexes.start + sect_id + 1); header.offset = if (header.isZerofill()) @@ -1598,8 +1598,8 @@ pub const Zld = struct { segment.vmsize = start; } - segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size); + segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size); + segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size); } const InitSectionOpts = struct { @@ -1709,7 +1709,7 @@ pub const Zld = struct { try self.writeSymtabs(); const seg = self.getLinkeditSegmentPtr(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromContainer( @@ -2112,17 +2112,17 @@ pub const Zld = struct { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -2130,7 +2130,7 @@ pub const Zld = struct { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -2268,7 +2268,7 @@ pub const Zld = struct { const offset = link_seg.fileoff + link_seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow; if (padding > 0) { try buffer.ensureUnusedCapacity(padding); @@ -2347,7 +2347,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow); @@ -2480,7 +2480,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2515,7 +2515,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2690,7 +2690,7 @@ pub const Zld = struct { for (subsections[0..count]) |cut| { const size = cut.end - cut.start; - const num_chunks = mem.alignForward(size, chunk_size) / chunk_size; + const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size; var i: usize = 0; while (i < num_chunks) : (i += 1) { @@ -2725,10 +2725,10 @@ pub const Zld = struct { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index f911074473..2d2930be8c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void { } } } - offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); + offset = std.mem.alignForward(u32, offset, atom.alignment); atom.offset = offset; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol_loc.getName(wasm), @@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void { offset += atom.size; atom_index = atom.prev orelse break; } - segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); + segment.size = std.mem.alignForward(u32, offset, segment.alignment); } } @@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void { const is_obj = wasm.base.options.output_mode == .Obj; if (place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); @@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void { var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment); // set TLS-related symbols if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { @@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void { // create the memory init flag which is used by the init memory function if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) { // align to pointer size - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4); + memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); sym.virtual_address = @intCast(u32, memory_ptr); @@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (!place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); } @@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void { // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); } // Setup the max amount of pages @@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void { } memory_ptr = initial_memory; } - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size); + memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); diff --git a/src/objcopy.zig b/src/objcopy.zig index c5d0e8dcb3..014208cc0d 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -1024,7 +1024,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_size = @intCast(Elf_OffSize, data.len); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; - dest.sh_offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, addralign); + dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) { if (src.sh_offset > dest.sh_offset) { dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments @@ -1085,7 +1085,7 @@ fn ElfFile(comptime is_64: bool) type { // add a ".gnu_debuglink" section if (options.debuglink) |link| { const payload = payload: { - const crc_offset = std.mem.alignForward(link.name.len + 1, 4); + const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4); const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4); @memcpy(buf[0..link.name.len], link.name); @memset(buf[link.name.len..crc_offset], 0); @@ -1117,7 +1117,7 @@ fn ElfFile(comptime is_64: bool) type { // write the section header at the tail { - const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); + const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); const data = std.mem.sliceAsBytes(updated_section_header); assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum); diff --git a/src/type.zig b/src/type.zig index bb82a50682..1c3435dafd 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1339,7 +1339,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + const result = std.mem.alignForward(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; }, @@ -1380,14 +1380,14 @@ pub const Type = struct { var size: u64 = 0; if (code_align > payload_align) { size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); } else { size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); } return AbiSizeAdvanced{ .scalar = size }; }, @@ -1595,7 +1595,7 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { @@ -3194,7 +3194,7 @@ pub const Type = struct { const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); - const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); + const field_offset = std.mem.alignForward(u64, it.offset, field_align); it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } @@ -3223,7 +3223,7 @@ pub const Type = struct { return field_offset.offset; } - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)); }, .anon_struct_type => |tuple| { @@ -3239,11 +3239,11 @@ pub const Type = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); if (i == index) return offset; offset += field_ty.toType().abiSize(mod); } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + offset = std.mem.alignForward(u64, offset, @max(big_align, 1)); return offset; }, @@ -3254,7 +3254,7 @@ pub const Type = struct { const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + return std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { // {Payload, Tag} return 0; -- cgit v1.2.3