//! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. //! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. pt: Zcu.PerThread, /// Alias to `mod.gpa`. gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// The "owner" of a `Sema` represents the root "thing" that is being analyzed. /// This does not change throughout the entire lifetime of a `Sema`. For instance, /// when analyzing a runtime function body, this is always `func` of that function, /// even if an inline/comptime function call is being analyzed. owner: AnalUnit, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `sema.owner.func` if applicable, and then diverges /// in the case of an inline or comptime function call. /// This could be `none`, a `func_decl`, or a `func_instance`. func_index: InternPool.Index, /// Whether the type of func_index has a calling convention of `.Naked`. func_is_naked: bool, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, comptime_err_ret_trace: *std.ArrayList(LazySrcLoc), /// When semantic analysis needs to know the return type of the function whose body /// is being analyzed, this `Type` should be used instead of going through `func`. /// This will correctly handle the case of a comptime/inline function call of a /// generic function which uses a type expression for the return type. /// The type will be `void` in the case that `func` is `null`. fn_ret_ty: Type, /// In case of the return type being an error union with an inferred error /// set, this is the inferred error set. `null` otherwise. Allocated with /// `Sema.arena`. fn_ret_ty_ies: ?*InferredErrorSet, branch_quota: u32 = default_branch_quota, branch_count: u32 = 0, /// Populated when returning `error.ComptimeBreak`. Used to communicate the /// break instruction up the stack to find the corresponding Block. comptime_break_inst: Zir.Inst.Index = undefined, /// When doing a generic function instantiation, this array collects a value /// for each parameter of the generic owner. `none` for non-comptime parameters. /// This is a separate array from `block.params` so that it can be passed /// directly to `comptime_args` when calling `InternPool.getFuncInstance`. /// This memory is allocated by a parent `Sema` in the temporary arena, and is /// used only to add a `func_instance` into the `InternPool`. comptime_args: []InternPool.Index = &.{}, /// Used to communicate from a generic function instantiation to the logic that /// creates a generic function instantiation value in `funcCommon`. generic_owner: InternPool.Index = .none, /// When `generic_owner` is not none, this contains the generic function /// instantiation callsite so that compile errors on the parameter types of the /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means /// breaking from a block. post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{}, /// Populated with the last compile error created. err: ?*Module.ErrorMsg = null, /// Set to true when analyzing a func type instruction so that nested generic /// function types will emit generic poison instead of a partial type. no_partial_func_ty: bool = false, /// The temporary arena is used for the memory of the `InferredAlloc` values /// here so the values can be dropped without any cleanup. unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, /// Links every pointer derived from a base `alloc` back to that `alloc`. Used /// to detect comptime-known `const`s. /// TODO: ZIR liveness analysis would allow us to remove elements from this map. base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{}, /// Runtime `alloc`s are placed in this map to track all comptime-known writes /// before the corresponding `make_ptr_const` instruction. /// If any store to the alloc depends on a runtime condition or stores a runtime /// value, the corresponding element in this map is erased, to indicate that the /// alloc is not comptime-known. /// If the alloc remains in this map when `make_ptr_const` is reached, its value /// is comptime-known, and all stores to the pointer must be applied at comptime /// to determine the comptime value. /// Backed by gpa. maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .{}, /// Comptime-mutable allocs, and any comptime allocs which reference it, are /// stored as elements of this array. /// Pointers to such memory are represented via an index into this array. /// Backed by gpa. comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, /// A list of exports performed by this analysis. After this `Sema` terminates, /// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, /// All references registered so far by this `Sema`. This is a temporary duplicate /// of data stored in `Zcu.all_references`. It exists to avoid adding references to /// a given `AnalUnit` multiple times. references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// All dependencies registered so far by this `Sema`. This is a temporary duplicate /// of the main dependency data. It exists to avoid adding dependencies to a given /// `AnalUnit` multiple times. dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{}, const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, /// Backed by sema.arena. Tracks all comptime-known stores to this `alloc`. Due to /// RLS, a single comptime-known allocation may have arbitrarily many stores. /// This list also contains `set_union_tag`, `optional_payload_ptr_set`, and /// `errunion_payload_ptr_set` instructions. /// If the instruction is one of these three tags, `src` may be `.unneeded`. stores: std.MultiArrayList(struct { inst: Air.Inst.Index, src: LazySrcLoc, }) = .{}, }; const ComptimeAlloc = struct { val: MutableValue, is_const: bool, /// `.none` indicates that the alignment is the natural alignment of `val`. alignment: Alignment, /// This is the `runtime_index` at the point of this allocation. If an store /// to this alloc ever occurs with a runtime index greater than this one, it /// is behind a runtime condition, so a compile error will be emitted. runtime_index: Value.RuntimeIndex, }; fn newComptimeAlloc(sema: *Sema, block: *Block, ty: Type, alignment: Alignment) !ComptimeAllocIndex { const idx = sema.comptime_allocs.items.len; try sema.comptime_allocs.append(sema.gpa, .{ .val = .{ .interned = try sema.pt.intern(.{ .undef = ty.toIntern() }) }, .is_const = false, .alignment = alignment, .runtime_index = block.runtime_index, }); return @enumFromInt(idx); } pub fn getComptimeAlloc(sema: *Sema, idx: ComptimeAllocIndex) *ComptimeAlloc { return &sema.comptime_allocs.items[@intFromEnum(idx)]; } const std = @import("std"); const math = std.math; const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("Value.zig"); const MutableValue = @import("mutable_value.zig").MutableValue; const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Zir = std.zig.Zir; const Zcu = @import("Zcu.zig"); const Module = Zcu; const trace = @import("tracy.zig").trace; const Namespace = Module.Namespace; const CompileError = Module.CompileError; const SemaError = Module.SemaError; const LazySrcLoc = Zcu.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; const AnalUnit = InternPool.AnalUnit; const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; pub const InferredErrorSet = struct { /// The function body from which this error set originates. /// This is `none` in the case of a comptime/inline function call, corresponding to /// `InternPool.Index.adhoc_inferred_error_set_type`. /// The function's resolved error set is not set until analysis of the /// function body completes. func: InternPool.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that /// are returned from any dependent functions. errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// The regular error set created by resolving this inferred error set. resolved: InternPool.Index = .none, pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); pub fn addErrorSet( self: *InferredErrorSet, err_set_ty: Type, ip: *InternPool, arena: Allocator, ) !void { switch (err_set_ty.toIntern()) { .anyerror_type => self.resolved = .anyerror_type, .adhoc_inferred_error_set_type => {}, // Adding an inferred error set to itself. else => switch (ip.indexToKey(err_set_ty.toIntern())) { .error_set_type => |error_set_type| { for (error_set_type.names.get(ip)) |name| { try self.errors.put(arena, name, {}); } }, .inferred_error_set_type => { try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {}); }, else => unreachable, }, } } }; /// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve /// instructions during analysis. /// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the /// zir instruction index and a start offset. An index is not present in the map if the value /// at the index is `Air.Inst.Ref.none`. /// `ensureSpaceForInstructions` can be called to force InstMap to have a mapped range that /// includes all instructions in a slice. After calling this function, `putAssumeCapacity*` can /// be called safely for any of the instructions passed in. pub const InstMap = struct { items: []Air.Inst.Ref = &[_]Air.Inst.Ref{}, start: Zir.Inst.Index = @enumFromInt(0), pub fn deinit(map: InstMap, allocator: mem.Allocator) void { allocator.free(map.items); } pub fn get(map: InstMap, key: Zir.Inst.Index) ?Air.Inst.Ref { if (!map.contains(key)) return null; return map.items[@intFromEnum(key) - @intFromEnum(map.start)]; } pub fn putAssumeCapacity( map: *InstMap, key: Zir.Inst.Index, ref: Air.Inst.Ref, ) void { map.items[@intFromEnum(key) - @intFromEnum(map.start)] = ref; } pub fn putAssumeCapacityNoClobber( map: *InstMap, key: Zir.Inst.Index, ref: Air.Inst.Ref, ) void { assert(!map.contains(key)); map.putAssumeCapacity(key, ref); } pub const GetOrPutResult = struct { value_ptr: *Air.Inst.Ref, found_existing: bool, }; pub fn getOrPutAssumeCapacity( map: *InstMap, key: Zir.Inst.Index, ) GetOrPutResult { const index = @intFromEnum(key) - @intFromEnum(map.start); return GetOrPutResult{ .value_ptr = &map.items[index], .found_existing = map.items[index] != .none, }; } pub fn remove(map: InstMap, key: Zir.Inst.Index) bool { if (!map.contains(key)) return false; map.items[@intFromEnum(key) - @intFromEnum(map.start)] = .none; return true; } pub fn contains(map: InstMap, key: Zir.Inst.Index) bool { return map.items[@intFromEnum(key) - @intFromEnum(map.start)] != .none; } pub fn ensureSpaceForInstructions( map: *InstMap, allocator: mem.Allocator, insts: []const Zir.Inst.Index, ) !void { const start, const end = mem.minMax(u32, @ptrCast(insts)); const map_start = @intFromEnum(map.start); if (map_start <= start and end < map.items.len + map_start) return; const old_start = if (map.items.len == 0) start else map_start; var better_capacity = map.items.len; var better_start = old_start; while (true) { const extra_capacity = better_capacity / 2 + 16; better_capacity += extra_capacity; better_start -|= @intCast(extra_capacity / 2); if (better_start <= start and end < better_capacity + better_start) break; } const start_diff = old_start - better_start; const new_items = try allocator.alloc(Air.Inst.Ref, better_capacity); @memset(new_items[0..start_diff], .none); @memcpy(new_items[start_diff..][0..map.items.len], map.items); @memset(new_items[start_diff + map.items.len ..], .none); allocator.free(map.items); map.items = new_items; map.start = @enumFromInt(better_start); } }; /// This is the context needed to semantically analyze ZIR instructions and /// produce AIR instructions. /// This is a temporary structure stored on the stack; references to it are valid only /// during semantic analysis of the block. pub const Block = struct { parent: ?*Block, /// Shared among all child blocks. sema: *Sema, /// The namespace to use for lookups from this source block namespace: InternPool.NamespaceIndex, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. /// When doing a generic function instantiation, this array collects a type /// for each *runtime-known* parameter. This array corresponds to the instance /// function type, while `Sema.comptime_args` corresponds to the generic owner /// function type. /// This memory is allocated by a parent `Sema` in the temporary arena, and is /// used to add a `func_instance` into the `InternPool`. params: std.MultiArrayList(Param) = .{}, label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. runtime_cond: ?LazySrcLoc = null, runtime_loop: ?LazySrcLoc = null, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to comptime variables are only allowed when var.runtime_index <= runtime_index. runtime_index: Value.RuntimeIndex = .zero, inline_block: Zir.Inst.OptionalIndex = .none, comptime_reason: ?*const ComptimeReason = null, // TODO is_comptime and comptime_reason should probably be merged together. is_comptime: bool, is_typeof: bool = false, /// Keep track of the active error return trace index around blocks so that we can correctly /// pop the error trace upon block exit. error_return_trace_index: Air.Inst.Ref = .none, /// when null, it is determined by build mode, changed by @setRuntimeSafety want_safety: ?bool = null, /// What mode to generate float operations in, set by @setFloatMode float_mode: std.builtin.FloatMode = .strict, c_import_buf: ?*std.ArrayList(u8) = null, /// If not `null`, this boolean is set when a `dbg_var_ptr` or `dbg_var_val` /// instruction is emitted. It signals that the innermost lexically /// enclosing `block`/`block_inline` should be translated into a real AIR /// `block` in order for codegen to match lexical scoping for debug vars. need_debug_scope: ?*bool = null, /// Relative source locations encountered while traversing this block should be /// treated as relative to the AST node of this ZIR instruction. src_base_inst: InternPool.TrackedInst.Index, /// The name of the current "context" for naming namespace types. /// The interpretation of this depends on the name strategy in ZIR, but the name /// is always incorporated into the type name somehow. /// See `Sema.createTypeName`. type_name_ctx: InternPool.NullTerminatedString, /// Create a `LazySrcLoc` based on an `Offset` from the code being analyzed in this block. /// Specifically, the given `Offset` is treated as relative to `block.src_base_inst`. pub fn src(block: Block, offset: LazySrcLoc.Offset) LazySrcLoc { return .{ .base_node_inst = block.src_base_inst, .offset = offset, }; } fn builtinCallArgSrc(block: *Block, builtin_call_node: i32, arg_index: u32) LazySrcLoc { return block.src(.{ .node_offset_builtin_call_arg = .{ .builtin_call_node = builtin_call_node, .arg_index = arg_index, } }); } fn nodeOffset(block: Block, node_offset: i32) LazySrcLoc { return block.src(LazySrcLoc.Offset.nodeOffset(node_offset)); } fn tokenOffset(block: Block, tok_offset: u32) LazySrcLoc { return block.src(.{ .token_offset = tok_offset }); } const ComptimeReason = union(enum) { c_import: struct { src: LazySrcLoc, }, comptime_ret_ty: struct { func: Air.Inst.Ref, func_src: LazySrcLoc, return_ty: Type, }, fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; const pt = sema.pt; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { try sema.errNote(ci.src, parent, prefix ++ "it is inside a @cImport", .{}); }, .comptime_ret_ty => |rt| { const ret_ty_src: LazySrcLoc = if (try sema.funcDeclSrcInst(rt.func)) |fn_decl_inst| .{ .base_node_inst = fn_decl_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 }, } else rt.func_src; if (rt.return_ty.isGenericPoison()) { return sema.errNote(ret_ty_src, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); } try sema.errNote( ret_ty_src, parent, prefix ++ "the function returns a comptime-only type '{}'", .{rt.return_ty.fmt(pt)}, ); try sema.explainWhyTypeIsComptime(parent, ret_ty_src, rt.return_ty); }, } } }; const Param = struct { /// `none` means `anytype`. ty: InternPool.Index, is_comptime: bool, name: Zir.NullTerminatedString, }; /// This `Block` maps a block ZIR instruction to the corresponding /// AIR instruction for break instruction analysis. pub const Label = struct { zir_block: Zir.Inst.Index, merges: Merges, }; /// This `Block` indicates that an inline function call is happening /// and return instructions should be analyzed as a break instruction /// to this AIR block instruction. /// It is shared among all the blocks in an inline or comptime called /// function. pub const Inlining = struct { call_block: *Block, call_src: LazySrcLoc, has_comptime_args: bool, func: InternPool.Index, comptime_result: Air.Inst.Ref, merges: Merges, }; pub const Merges = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. results: std.ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. br_list: std.ArrayListUnmanaged(Air.Inst.Index), /// Keeps the source location of the rhs operand of the break instruction, /// to enable more precise compile errors. /// Same indexes, capacity, length as `results`. src_locs: std.ArrayListUnmanaged(?LazySrcLoc), pub fn deinit(merges: *@This(), allocator: mem.Allocator) void { merges.results.deinit(allocator); merges.br_list.deinit(allocator); merges.src_locs.deinit(allocator); } }; pub fn makeSubBlock(parent: *Block) Block { return .{ .parent = parent, .sema = parent.sema, .namespace = parent.namespace, .instructions = .{}, .label = null, .inlining = parent.inlining, .is_comptime = parent.is_comptime, .comptime_reason = parent.comptime_reason, .is_typeof = parent.is_typeof, .runtime_cond = parent.runtime_cond, .runtime_loop = parent.runtime_loop, .runtime_index = parent.runtime_index, .want_safety = parent.want_safety, .float_mode = parent.float_mode, .c_import_buf = parent.c_import_buf, .error_return_trace_index = parent.error_return_trace_index, .need_debug_scope = parent.need_debug_scope, .src_base_inst = parent.src_base_inst, .type_name_ctx = parent.type_name_ctx, }; } pub fn wantSafety(block: *const Block) bool { return block.want_safety orelse switch (block.sema.pt.zcu.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } pub fn getFileScope(block: *Block, zcu: *Zcu) *Zcu.File { return zcu.fileByIndex(getFileScopeIndex(block, zcu)); } pub fn getFileScopeIndex(block: *Block, zcu: *Zcu) Zcu.File.Index { return zcu.namespacePtr(block.namespace).file_scope; } fn addTy( block: *Block, tag: Air.Inst.Tag, ty: Type, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty = ty }, }); } fn addTyOp( block: *Block, tag: Air.Inst.Tag, ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = Air.internedToRef(ty.toIntern()), .operand = operand, } }, }); } fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref { return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = Air.internedToRef(ty.toIntern()), .operand = operand, } }, }); } fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .no_op = {} }, }); } fn addUnOp( block: *Block, tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .un_op = operand }, }); } fn addBr( block: *Block, target_block: Air.Inst.Index, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .br, .data = .{ .br = .{ .block_inst = target_block, .operand = operand, } }, }); } fn addBinOp( block: *Block, tag: Air.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .bin_op = .{ .lhs = lhs, .rhs = rhs, } }, }); } fn addStructFieldPtr( block: *Block, struct_ptr: Air.Inst.Ref, field_index: u32, ptr_field_ty: Type, ) !Air.Inst.Ref { const ty = Air.internedToRef(ptr_field_ty.toIntern()); const tag: Air.Inst.Tag = switch (field_index) { 0 => .struct_field_ptr_index_0, 1 => .struct_field_ptr_index_1, 2 => .struct_field_ptr_index_2, 3 => .struct_field_ptr_index_3, else => { return block.addInst(.{ .tag = .struct_field_ptr, .data = .{ .ty_pl = .{ .ty = ty, .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_ptr, .field_index = field_index, }), } }, }); }, }; return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = ty, .operand = struct_ptr, } }, }); } fn addStructFieldVal( block: *Block, struct_val: Air.Inst.Ref, field_index: u32, field_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .struct_field_val, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(field_ty.toIntern()), .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_val, .field_index = field_index, }), } }, }); } fn addSliceElemPtr( block: *Block, slice: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .slice_elem_ptr, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(elem_ptr_ty.toIntern()), .payload = try block.sema.addExtra(Air.Bin{ .lhs = slice, .rhs = elem_index, }), } }, }); } fn addPtrElemPtr( block: *Block, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { const ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); return block.addPtrElemPtrTypeRef(array_ptr, elem_index, ty_ref); } fn addPtrElemPtrTypeRef( block: *Block, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Air.Inst.Ref, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .ptr_elem_ptr, .data = .{ .ty_pl = .{ .ty = elem_ptr_ty, .payload = try block.sema.addExtra(Air.Bin{ .lhs = array_ptr, .rhs = elem_index, }), } }, }); } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { const sema = block.sema; const pt = sema.pt; const mod = pt.zcu; return block.addInst(.{ .tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ .ty = Air.internedToRef((try pt.vectorType(.{ .len = sema.typeOf(lhs).vectorLen(mod), .child = .bool_type, })).toIntern()), .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), }), } }, }); } fn addAggregateInit( block: *Block, aggregate_ty: Type, elements: []const Air.Inst.Ref, ) !Air.Inst.Ref { const sema = block.sema; const ty_ref = Air.internedToRef(aggregate_ty.toIntern()); try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len); const extra_index: u32 = @intCast(sema.air_extra.items.len); sema.appendRefsAssumeCapacity(elements); return block.addInst(.{ .tag = .aggregate_init, .data = .{ .ty_pl = .{ .ty = ty_ref, .payload = extra_index, } }, }); } fn addUnionInit( block: *Block, union_ty: Type, field_index: u32, init: Air.Inst.Ref, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .union_init, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(union_ty.toIntern()), .payload = try block.sema.addExtra(Air.UnionInit{ .field_index = field_index, .init = init, }), } }, }); } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return (try block.addInstAsIndex(inst)).toRef(); } pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; } /// Insert an instruction into the block at `index`. Moves all following /// instructions forward in the block to make room. Operation is O(N). pub fn insertInst(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return (try block.insertInstAsIndex(index, inst)).toRef(); } pub fn insertInstAsIndex(block: *Block, index: Air.Inst.Index, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); try block.instructions.insert(gpa, @intFromEnum(index), result_index); return result_index; } pub fn ownerModule(block: Block) *Package.Module { const zcu = block.sema.pt.zcu; return zcu.namespacePtr(block.namespace).fileScope(zcu).mod; } fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index { const pt = block.sema.pt; return pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{ .file = block.getFileScopeIndex(pt.zcu), .inst = inst, }); } }; const LabeledBlock = struct { block: Block, label: Block.Label, fn destroy(lb: *LabeledBlock, gpa: Allocator) void { lb.block.instructions.deinit(gpa); lb.label.merges.deinit(gpa); gpa.destroy(lb); } }; /// The value stored in the inferred allocation. This will go into /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. const InferredAlloc = struct { /// The placeholder `store` instructions used before the result pointer type /// is known. These should be rewritten to perform any required coercions /// when the type is resolved. /// Allocated from `sema.arena`. prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }; const NeededComptimeReason = struct { needed_comptime_reason: []const u8, block_comptime_reason: ?*const Block.ComptimeReason = null, }; pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { const labeled_block = entry.value_ptr.*; labeled_block.destroy(gpa); } sema.post_hoc_blocks.deinit(gpa); } sema.unresolved_inferred_allocs.deinit(gpa); sema.base_allocs.deinit(gpa); sema.maybe_comptime_allocs.deinit(gpa); sema.comptime_allocs.deinit(gpa); sema.exports.deinit(gpa); sema.references.deinit(gpa); sema.type_references.deinit(gpa); sema.dependencies.deinit(gpa); sema.* = undefined; } /// Performs semantic analysis of a ZIR body which is behind a runtime condition. If comptime /// control flow happens here, Sema will convert it to runtime control flow by introducing post-hoc /// blocks where necessary. fn analyzeBodyRuntimeBreak(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) !void { sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => { const zir_datas = sema.code.instructions.items(.data); const break_data = zir_datas[@intFromEnum(sema.comptime_break_inst)].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; try sema.addRuntimeBreak(block, extra.block_inst, break_data.operand); }, else => |e| return e, }; } /// Semantically analyze a ZIR function body. It is guranteed by AstGen that such a body cannot /// trigger comptime control flow to move above the function body. pub fn analyzeFnBody( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, ) !void { sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => unreachable, // unexpected comptime control flow else => |e| return e, }; } /// Given a ZIR body which can be exited via a `break_inline` instruction, or a non-inline body which /// we are evaluating at comptime, semantically analyze the body and return the result from it. /// Returns `null` if control flow did not break from this block, but instead terminated with some /// other runtime noreturn instruction. Compile-time breaks to blocks further up the stack still /// return `error.ComptimeBreak`. If `block.is_comptime`, this function will never return `null`. fn analyzeInlineBody( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, /// The index which a break instruction can target to break from this body. break_target: Zir.Inst.Index, ) CompileError!?Air.Inst.Ref { if (sema.analyzeBodyInner(block, body)) |_| { return null; } else |err| switch (err) { error.ComptimeBreak => {}, else => |e| return e, } const break_inst = sema.comptime_break_inst; const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; if (extra.block_inst != break_target) { // This control flow goes further up the stack. return error.ComptimeBreak; } return try sema.resolveInst(break_data.operand); } /// Like `analyzeInlineBody`, but if the body does not break with a value, returns /// `.unreachable_value` instead of `null`. Notably, use this to evaluate an arbitrary /// body at comptime to a single result value. pub fn resolveInlineBody( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, /// The index which a break instruction can target to break from this body. break_target: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { return (try sema.analyzeInlineBody(block, body, break_target)) orelse .unreachable_value; } /// This function is the main loop of `Sema`. It analyzes a single body of ZIR instructions. /// /// If this function returns normally, the merges of `block` were populated with all possible /// (runtime) results of this block. Peer type resolution should be performed on the result, /// and relevant runtime instructions written to perform necessary coercions and breaks. See /// `resolveAnalyzedBlock`. This form of return is impossible if `block.is_comptime == true`. /// /// Alternatively, this function may return `error.ComptimeBreak`. This indicates that comptime /// control flow is happening, and we are breaking at comptime from a block indicated by the /// break instruction in `sema.comptime_break_inst`. This occurs for any `break_inline`, or for a /// standard `break` at comptime. This error is pushed up the stack until the target block is /// reached, at which point the break operand will be fetched. /// /// It is rare to call this function directly. Usually, you want one of the following wrappers: /// * If the body is exited via a `break_inline`, or is being evaluated at comptime, /// use `Sema.analyzeInlineBody` or `Sema.resolveInlineBody`. /// * If the body is behind a fresh runtime condition, use `Sema.analyzeBodyRuntimeBreak`. /// * If the body is an entire function body, use `Sema.analyzeFnBody`. /// * If the body is to be generated into an AIR `block`, use `Sema.resolveBlockBody`. /// * Otherwise, direct usage of `Sema.analyzeBodyInner` may be necessary. fn analyzeBodyInner( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, ) CompileError!void { // No tracy calls here, to avoid interfering with the tail call mechanism. try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); const pt = sema.pt; const zcu = pt.zcu; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); var crash_info = crash_report.prepAnalyzeBody(sema, block, body); crash_info.push(); defer crash_info.pop(); // We use a while (true) loop here to avoid a redundant way of breaking out of // the loop. The only way to break out of the loop is with a `noreturn` // instruction. var i: u32 = 0; while (true) { crash_info.setBodyIndex(i); const inst = body[i]; // The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away. if (build_options.enable_logging) { std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: { const file_index = block.src_base_inst.resolveFile(&zcu.intern_pool); const file = zcu.fileByIndex(file_index); break :sub_file_path file.sub_file_path; }, inst }); } const air_inst: Air.Inst.Ref = switch (tags[@intFromEnum(inst)]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), .alloc_inferred => try sema.zirAllocInferred(block, true), .alloc_inferred_mut => try sema.zirAllocInferred(block, false), .alloc_inferred_comptime => try sema.zirAllocInferredComptime(true), .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(false), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), .anyframe_type => try sema.zirAnyframeType(block, inst), .array_cat => try sema.zirArrayCat(block, inst), .array_mul => try sema.zirArrayMul(block, inst), .array_type => try sema.zirArrayType(block, inst), .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), .vector_type => try sema.zirVectorType(block, inst), .as_node => try sema.zirAsNode(block, inst), .as_shift_operand => try sema.zirAsShiftOperand(block, inst), .bit_and => try sema.zirBitwise(block, inst, .bit_and), .bit_not => try sema.zirBitNot(block, inst), .bit_or => try sema.zirBitwise(block, inst, .bit_or), .bitcast => try sema.zirBitcast(block, inst), .suspend_block => try sema.zirSuspendBlock(block, inst), .bool_not => try sema.zirBoolNot(block, inst), .bool_br_and => try sema.zirBoolBr(block, inst, false), .bool_br_or => try sema.zirBoolBr(block, inst, true), .c_import => try sema.zirCImport(block, inst), .call => try sema.zirCall(block, inst, .direct), .field_call => try sema.zirCall(block, inst, .field), .cmp_lt => try sema.zirCmp(block, inst, .lt), .cmp_lte => try sema.zirCmp(block, inst, .lte), .cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .optimized)), .cmp_gte => try sema.zirCmp(block, inst, .gte), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .optimized)), .decl_ref => try sema.zirDeclRef(block, inst), .decl_val => try sema.zirDeclVal(block, inst), .load => try sema.zirLoad(block, inst), .elem_ptr => try sema.zirElemPtr(block, inst), .elem_ptr_node => try sema.zirElemPtrNode(block, inst), .elem_val => try sema.zirElemVal(block, inst), .elem_val_node => try sema.zirElemValNode(block, inst), .elem_val_imm => try sema.zirElemValImm(block, inst), .elem_type => try sema.zirElemType(block, inst), .indexable_ptr_elem_type => try sema.zirIndexablePtrElemType(block, inst), .vector_elem_type => try sema.zirVectorElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .int_from_enum => try sema.zirIntFromEnum(block, inst), .enum_from_int => try sema.zirEnumFromInt(block, inst), .err_union_code => try sema.zirErrUnionCode(block, inst), .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst), .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst), .error_union_type => try sema.zirErrorUnionType(block, inst), .error_value => try sema.zirErrorValue(block, inst), .field_ptr => try sema.zirFieldPtr(block, inst), .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), .field_val => try sema.zirFieldVal(block, inst), .field_val_named => try sema.zirFieldValNamed(block, inst), .func => try sema.zirFunc(block, inst, false), .func_inferred => try sema.zirFunc(block, inst, true), .func_fancy => try sema.zirFuncFancy(block, inst), .import => try sema.zirImport(block, inst), .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), .int => try sema.zirInt(block, inst), .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), .int_type => try sema.zirIntType(inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .ret_is_non_err => try sema.zirRetIsNonErr(block, inst), .is_non_null => try sema.zirIsNonNull(block, inst), .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), .merge_error_sets => try sema.zirMergeErrorSets(block, inst), .negate => try sema.zirNegate(block, inst), .negate_wrap => try sema.zirNegateWrap(block, inst), .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), .optional_type => try sema.zirOptionalType(block, inst), .ptr_type => try sema.zirPtrType(block, inst), .ref => try sema.zirRef(block, inst), .ret_err_value_code => try sema.zirRetErrValueCode(inst), .shr => try sema.zirShr(block, inst, .shr), .shr_exact => try sema.zirShr(block, inst, .shr_exact), .slice_end => try sema.zirSliceEnd(block, inst), .slice_sentinel => try sema.zirSliceSentinel(block, inst), .slice_start => try sema.zirSliceStart(block, inst), .slice_length => try sema.zirSliceLength(block, inst), .str => try sema.zirStr(inst), .switch_block => try sema.zirSwitchBlock(block, inst, false), .switch_block_ref => try sema.zirSwitchBlock(block, inst, true), .switch_block_err_union => try sema.zirSwitchBlockErrUnion(block, inst), .type_info => try sema.zirTypeInfo(block, inst), .size_of => try sema.zirSizeOf(block, inst), .bit_size_of => try sema.zirBitSizeOf(block, inst), .typeof => try sema.zirTypeof(block, inst), .typeof_builtin => try sema.zirTypeofBuiltin(block, inst), .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), .xor => try sema.zirBitwise(block, inst, .xor), .struct_init_empty => try sema.zirStructInitEmpty(block, inst), .struct_init_empty_result => try sema.zirStructInitEmptyResult(block, inst, false), .struct_init_empty_ref_result => try sema.zirStructInitEmptyResult(block, inst, true), .struct_init_anon => try sema.zirStructInitAnon(block, inst), .struct_init => try sema.zirStructInit(block, inst, false), .struct_init_ref => try sema.zirStructInit(block, inst, true), .struct_init_field_type => try sema.zirStructInitFieldType(block, inst), .struct_init_field_ptr => try sema.zirStructInitFieldPtr(block, inst), .array_init_anon => try sema.zirArrayInitAnon(block, inst), .array_init => try sema.zirArrayInit(block, inst, false), .array_init_ref => try sema.zirArrayInit(block, inst, true), .array_init_elem_type => try sema.zirArrayInitElemType(block, inst), .array_init_elem_ptr => try sema.zirArrayInitElemPtr(block, inst), .union_init => try sema.zirUnionInit(block, inst), .field_type_ref => try sema.zirFieldTypeRef(block, inst), .int_from_ptr => try sema.zirIntFromPtr(block, inst), .align_of => try sema.zirAlignOf(block, inst), .int_from_bool => try sema.zirIntFromBool(block, inst), .embed_file => try sema.zirEmbedFile(block, inst), .error_name => try sema.zirErrorName(block, inst), .tag_name => try sema.zirTagName(block, inst), .type_name => try sema.zirTypeName(block, inst), .frame_type => try sema.zirFrameType(block, inst), .frame_size => try sema.zirFrameSize(block, inst), .int_from_float => try sema.zirIntFromFloat(block, inst), .float_from_int => try sema.zirFloatFromInt(block, inst), .ptr_from_int => try sema.zirPtrFromInt(block, inst), .float_cast => try sema.zirFloatCast(block, inst), .int_cast => try sema.zirIntCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst), .truncate => try sema.zirTruncate(block, inst), .has_decl => try sema.zirHasDecl(block, inst), .has_field => try sema.zirHasField(block, inst), .byte_swap => try sema.zirByteSwap(block, inst), .bit_reverse => try sema.zirBitReverse(block, inst), .bit_offset_of => try sema.zirBitOffsetOf(block, inst), .offset_of => try sema.zirOffsetOf(block, inst), .splat => try sema.zirSplat(block, inst), .reduce => try sema.zirReduce(block, inst), .shuffle => try sema.zirShuffle(block, inst), .atomic_load => try sema.zirAtomicLoad(block, inst), .atomic_rmw => try sema.zirAtomicRmw(block, inst), .mul_add => try sema.zirMulAdd(block, inst), .builtin_call => try sema.zirBuiltinCall(block, inst), .@"resume" => try sema.zirResume(block, inst), .@"await" => try sema.zirAwait(block, inst), .for_len => try sema.zirForLen(block, inst), .validate_array_init_ref_ty => try sema.zirValidateArrayInitRefTy(block, inst), .opt_eu_base_ptr_init => try sema.zirOptEuBasePtrInit(block, inst), .coerce_ptr_elem_ty => try sema.zirCoercePtrElemTy(block, inst), .clz => try sema.zirBitCount(block, inst, .clz, Value.clz), .ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz), .pop_count => try sema.zirBitCount(block, inst, .popcount, Value.popCount), .abs => try sema.zirAbs(block, inst), .sqrt => try sema.zirUnaryMath(block, inst, .sqrt, Value.sqrt), .sin => try sema.zirUnaryMath(block, inst, .sin, Value.sin), .cos => try sema.zirUnaryMath(block, inst, .cos, Value.cos), .tan => try sema.zirUnaryMath(block, inst, .tan, Value.tan), .exp => try sema.zirUnaryMath(block, inst, .exp, Value.exp), .exp2 => try sema.zirUnaryMath(block, inst, .exp2, Value.exp2), .log => try sema.zirUnaryMath(block, inst, .log, Value.log), .log2 => try sema.zirUnaryMath(block, inst, .log2, Value.log2), .log10 => try sema.zirUnaryMath(block, inst, .log10, Value.log10), .floor => try sema.zirUnaryMath(block, inst, .floor, Value.floor), .ceil => try sema.zirUnaryMath(block, inst, .ceil, Value.ceil), .round => try sema.zirUnaryMath(block, inst, .round, Value.round), .trunc => try sema.zirUnaryMath(block, inst, .trunc_float, Value.trunc), .error_set_decl => try sema.zirErrorSetDecl(inst), .add => try sema.zirArithmetic(block, inst, .add, true), .addwrap => try sema.zirArithmetic(block, inst, .addwrap, true), .add_sat => try sema.zirArithmetic(block, inst, .add_sat, true), .add_unsafe => try sema.zirArithmetic(block, inst, .add_unsafe, false), .mul => try sema.zirArithmetic(block, inst, .mul, true), .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap, true), .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat, true), .sub => try sema.zirArithmetic(block, inst, .sub, true), .subwrap => try sema.zirArithmetic(block, inst, .subwrap, true), .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat, true), .div => try sema.zirDiv(block, inst), .div_exact => try sema.zirDivExact(block, inst), .div_floor => try sema.zirDivFloor(block, inst), .div_trunc => try sema.zirDivTrunc(block, inst), .mod_rem => try sema.zirModRem(block, inst), .mod => try sema.zirMod(block, inst), .rem => try sema.zirRem(block, inst), .max => try sema.zirMinMax(block, inst, .max), .min => try sema.zirMinMax(block, inst, .min), .shl => try sema.zirShl(block, inst, .shl), .shl_exact => try sema.zirShl(block, inst, .shl_exact), .shl_sat => try sema.zirShl(block, inst, .shl_sat), .ret_ptr => try sema.zirRetPtr(block), .ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can // tail call them here. .compile_error => break try sema.zirCompileError(block, inst), .ret_implicit => break try sema.zirRetImplicit(block, inst), .ret_node => break try sema.zirRetNode(block, inst), .ret_load => break try sema.zirRetLoad(block, inst), .ret_err_value => break try sema.zirRetErrValue(block, inst), .@"unreachable" => break try sema.zirUnreachable(block, inst), .panic => break try sema.zirPanic(block, inst), .trap => break try sema.zirTrap(block, inst), // zig fmt: on // This instruction never exists in an analyzed body. It exists only in the declaration // list for a container type. .declaration => unreachable, .extended => ext: { const extended = datas[@intFromEnum(inst)].extended; break :ext switch (extended.opcode) { // zig fmt: off .variable => try sema.zirVarExtended( block, extended), .struct_decl => try sema.zirStructDecl( block, extended, inst), .enum_decl => try sema.zirEnumDecl( block, extended, inst), .union_decl => try sema.zirUnionDecl( block, extended, inst), .opaque_decl => try sema.zirOpaqueDecl( block, extended, inst), .this => try sema.zirThis( block, extended), .ret_addr => try sema.zirRetAddr( block, extended), .builtin_src => try sema.zirBuiltinSrc( block, extended), .error_return_trace => try sema.zirErrorReturnTrace( block), .frame => try sema.zirFrame( block, extended), .frame_address => try sema.zirFrameAddress( block, extended), .alloc => try sema.zirAllocExtended( block, extended), .builtin_extern => try sema.zirBuiltinExtern( block, extended), .@"asm" => try sema.zirAsm( block, extended, false), .asm_expr => try sema.zirAsm( block, extended, true), .typeof_peer => try sema.zirTypeofPeer( block, extended, inst), .compile_log => try sema.zirCompileLog( block, extended), .min_multi => try sema.zirMinMaxMulti( block, extended, .min), .max_multi => try sema.zirMinMaxMulti( block, extended, .max), .add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .shl_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .c_undef => try sema.zirCUndef( block, extended), .c_include => try sema.zirCInclude( block, extended), .c_define => try sema.zirCDefine( block, extended), .wasm_memory_size => try sema.zirWasmMemorySize( block, extended), .wasm_memory_grow => try sema.zirWasmMemoryGrow( block, extended), .prefetch => try sema.zirPrefetch( block, extended), .error_cast => try sema.zirErrorCast( block, extended), .await_nosuspend => try sema.zirAwaitNosuspend( block, extended), .select => try sema.zirSelect( block, extended), .int_from_error => try sema.zirIntFromError( block, extended), .error_from_int => try sema.zirErrorFromInt( block, extended), .reify => try sema.zirReify( block, extended, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended), .cmpxchg => try sema.zirCmpxchg( block, extended), .c_va_arg => try sema.zirCVaArg( block, extended), .c_va_copy => try sema.zirCVaCopy( block, extended), .c_va_end => try sema.zirCVaEnd( block, extended), .c_va_start => try sema.zirCVaStart( block, extended), .ptr_cast_full => try sema.zirPtrCastFull( block, extended), .ptr_cast_no_dest => try sema.zirPtrCastNoDest( block, extended), .work_item_id => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_size => try sema.zirWorkItem( block, extended, extended.opcode), .work_group_id => try sema.zirWorkItem( block, extended, extended.opcode), .in_comptime => try sema.zirInComptime( block), .closure_get => try sema.zirClosureGet( block, extended), // zig fmt: on .fence => { try sema.zirFence(block, extended); i += 1; continue; }, .set_float_mode => { try sema.zirSetFloatMode(block, extended); i += 1; continue; }, .set_align_stack => { try sema.zirSetAlignStack(block, extended); i += 1; continue; }, .set_cold => { try sema.zirSetCold(block, extended); i += 1; continue; }, .breakpoint => { if (!block.is_comptime) { _ = try block.addNoOp(.breakpoint); } i += 1; continue; }, .disable_instrumentation => { try sema.zirDisableInstrumentation(); i += 1; continue; }, .restore_err_ret_index => { try sema.zirRestoreErrRetIndex(block, extended); i += 1; continue; }, .value_placeholder => unreachable, // never appears in a body .field_parent_ptr => try sema.zirFieldParentPtr(block, extended), .builtin_value => try sema.zirBuiltinValue(extended), }; }, // Instructions that we know can *never* be noreturn based solely on // their tag. We avoid needlessly checking if they are noreturn and // continue the loop. // We also know that they cannot be referenced later, so we avoid // putting them into the map. .dbg_stmt => { try sema.zirDbgStmt(block, inst); i += 1; continue; }, .dbg_var_ptr => { try sema.zirDbgVar(block, inst, .dbg_var_ptr); i += 1; continue; }, .dbg_var_val => { try sema.zirDbgVar(block, inst, .dbg_var_val); i += 1; continue; }, .ensure_err_union_payload_void => { try sema.zirEnsureErrUnionPayloadVoid(block, inst); i += 1; continue; }, .ensure_result_non_error => { try sema.zirEnsureResultNonError(block, inst); i += 1; continue; }, .ensure_result_used => { try sema.zirEnsureResultUsed(block, inst); i += 1; continue; }, .set_eval_branch_quota => { try sema.zirSetEvalBranchQuota(block, inst); i += 1; continue; }, .atomic_store => { try sema.zirAtomicStore(block, inst); i += 1; continue; }, .store_node => { try sema.zirStoreNode(block, inst); i += 1; continue; }, .store_to_inferred_ptr => { try sema.zirStoreToInferredPtr(block, inst); i += 1; continue; }, .resolve_inferred_alloc => { try sema.zirResolveInferredAlloc(block, inst); i += 1; continue; }, .validate_struct_init_ty => { try sema.zirValidateStructInitTy(block, inst, false); i += 1; continue; }, .validate_struct_init_result_ty => { try sema.zirValidateStructInitTy(block, inst, true); i += 1; continue; }, .validate_array_init_ty => { try sema.zirValidateArrayInitTy(block, inst, false); i += 1; continue; }, .validate_array_init_result_ty => { try sema.zirValidateArrayInitTy(block, inst, true); i += 1; continue; }, .validate_ptr_struct_init => { try sema.zirValidatePtrStructInit(block, inst); i += 1; continue; }, .validate_ptr_array_init => { try sema.zirValidatePtrArrayInit(block, inst); i += 1; continue; }, .validate_deref => { try sema.zirValidateDeref(block, inst); i += 1; continue; }, .validate_destructure => { try sema.zirValidateDestructure(block, inst); i += 1; continue; }, .validate_ref_ty => { try sema.zirValidateRefTy(block, inst); i += 1; continue; }, .@"export" => { try sema.zirExport(block, inst); i += 1; continue; }, .export_value => { try sema.zirExportValue(block, inst); i += 1; continue; }, .set_runtime_safety => { try sema.zirSetRuntimeSafety(block, inst); i += 1; continue; }, .param => { try sema.zirParam(block, inst, false); i += 1; continue; }, .param_comptime => { try sema.zirParam(block, inst, true); i += 1; continue; }, .param_anytype => { try sema.zirParamAnytype(block, inst, false); i += 1; continue; }, .param_anytype_comptime => { try sema.zirParamAnytype(block, inst, true); i += 1; continue; }, .memcpy => { try sema.zirMemcpy(block, inst); i += 1; continue; }, .memset => { try sema.zirMemset(block, inst); i += 1; continue; }, .check_comptime_control_flow => { if (!block.is_comptime) { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const inline_block = inst_data.operand.toIndex().?; var check_block = block; const target_runtime_index = while (true) { if (check_block.inline_block == inline_block.toOptional()) { break check_block.runtime_index; } check_block = check_block.parent.?; }; if (@intFromEnum(target_runtime_index) < @intFromEnum(block.runtime_index)) { const runtime_src = block.runtime_cond orelse block.runtime_loop.?; const msg = msg: { const msg = try sema.errMsg(src, "comptime control flow inside runtime block", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(runtime_src, msg, "runtime control flow here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } i += 1; continue; }, .save_err_ret_index => { try sema.zirSaveErrRetIndex(block, inst); i += 1; continue; }, .restore_err_ret_index_unconditional => { const un_node = datas[@intFromEnum(inst)].un_node; try sema.restoreErrRetIndex(block, block.nodeOffset(un_node.src_node), un_node.operand, .none); i += 1; continue; }, .restore_err_ret_index_fn_entry => { const un_node = datas[@intFromEnum(inst)].un_node; try sema.restoreErrRetIndex(block, block.nodeOffset(un_node.src_node), .none, un_node.operand); i += 1; continue; }, // Special case instructions to handle comptime control flow. .@"break" => { if (block.is_comptime) { sema.comptime_break_inst = inst; return error.ComptimeBreak; } else { try sema.zirBreak(block, inst); break; } }, .break_inline => { sema.comptime_break_inst = inst; return error.ComptimeBreak; }, .repeat => { if (block.is_comptime) { // Send comptime control flow back to the beginning of this block. const src = block.nodeOffset(datas[@intFromEnum(inst)].node); try sema.emitBackwardBranch(block, src); i = 0; continue; } else { // We are definitely called by `zirLoop`, which will treat the // fact that this body does not terminate `noreturn` as an // implicit repeat. break; } }, .repeat_inline => { // Send comptime control flow back to the beginning of this block. const src = block.nodeOffset(datas[@intFromEnum(inst)].node); try sema.emitBackwardBranch(block, src); i = 0; continue; }, .loop => blk: { if (!block.is_comptime) break :blk try sema.zirLoop(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); // Create a temporary child block so that this loop is properly // labeled for any .restore_err_ret_index instructions var child_block = block.makeSubBlock(); var label: Block.Label = .{ .zir_block = inst, .merges = undefined, }; child_block.label = &label; // Write these instructions directly into the parent block child_block.instructions = block.instructions; defer block.instructions = child_block.instructions; const result = try sema.analyzeInlineBody(&child_block, inline_body, inst) orelse break; break :blk result; }, .block, .block_comptime => blk: { if (!block.is_comptime) { break :blk try sema.zirBlock(block, inst, tags[@intFromEnum(inst)] == .block_comptime); } // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); // Create a temporary child block so that this block is properly // labeled for any .restore_err_ret_index instructions var child_block = block.makeSubBlock(); var label: Block.Label = .{ .zir_block = inst, .merges = undefined, }; child_block.label = &label; // Write these instructions directly into the parent block child_block.instructions = block.instructions; defer block.instructions = child_block.instructions; const result = try sema.analyzeInlineBody(&child_block, inline_body, inst) orelse break; break :blk result; }, .block_inline => blk: { // Directly analyze the block body without introducing a new block. // However, in the case of a corresponding break_inline which reaches // through a runtime conditional branch, we must retroactively emit // a block, so we remember the block index here just in case. const block_index = block.instructions.items.len; const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); const gpa = sema.gpa; const BreakResult = struct { block_inst: Zir.Inst.Index, operand: Zir.Inst.Ref, }; const opt_break_data: ?BreakResult, const need_debug_scope = b: { // Create a temporary child block so that this inline block is properly // labeled for any .restore_err_ret_index instructions var child_block = block.makeSubBlock(); var need_debug_scope = false; child_block.need_debug_scope = &need_debug_scope; // If this block contains a function prototype, we need to reset the // current list of parameters and restore it later. // Note: this probably needs to be resolved in a more general manner. const tag_index = @intFromEnum(inline_body[inline_body.len - 1]); child_block.inline_block = (if (tags[tag_index] == .repeat_inline) inline_body[0] else inst).toOptional(); var label: Block.Label = .{ .zir_block = inst, .merges = undefined, }; child_block.label = &label; // Write these instructions directly into the parent block child_block.instructions = block.instructions; defer block.instructions = child_block.instructions; const break_result: ?BreakResult = if (sema.analyzeBodyInner(&child_block, inline_body)) |_| r: { break :r null; } else |err| switch (err) { error.ComptimeBreak => brk_res: { const break_inst = sema.comptime_break_inst; const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break"; const break_extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; break :brk_res .{ .block_inst = break_extra.block_inst, .operand = break_data.operand, }; }, else => |e| return e, }; if (need_debug_scope) { _ = try sema.ensurePostHoc(block, inst); } break :b .{ break_result, need_debug_scope }; }; // A runtime conditional branch that needs a post-hoc block to be // emitted communicates this by mapping the block index into the inst map. if (map.get(inst)) |new_block_ref| ph: { // Comptime control flow populates the map, so we don't actually know // if this is a post-hoc runtime block until we check the // post_hoc_block map. const new_block_inst = new_block_ref.toIndex() orelse break :ph; const labeled_block = sema.post_hoc_blocks.get(new_block_inst) orelse break :ph; // In this case we need to move all the instructions starting at // block_index from the current block into this new one. if (opt_break_data) |break_data| { // This is a comptime break which we now change to a runtime break // since it crosses a runtime branch. // It may pass through our currently being analyzed block_inline or it // may point directly to it. In the latter case, this modifies the // block that we looked up in the post_hoc_blocks map above. try sema.addRuntimeBreak(block, break_data.block_inst, break_data.operand); } try labeled_block.block.instructions.appendSlice(gpa, block.instructions.items[block_index..]); block.instructions.items.len = block_index; const block_result = try sema.resolveAnalyzedBlock(block, block.nodeOffset(inst_data.src_node), &labeled_block.block, &labeled_block.label.merges, need_debug_scope); { // Destroy the ad-hoc block entry so that it does not interfere with // the next iteration of comptime control flow, if any. labeled_block.destroy(gpa); assert(sema.post_hoc_blocks.remove(new_block_inst)); } break :blk block_result; } const break_data = opt_break_data orelse break; if (inst == break_data.block_inst) { break :blk try sema.resolveInst(break_data.operand); } else { // `comptime_break_inst` preserved from `analyzeBodyInner` above. return error.ComptimeBreak; } }, .condbr => blk: { if (!block.is_comptime) { try sema.zirCondbr(block, inst); break; } // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[@intFromEnum(inst)].pl_node; const cond_src = block.src(.{ .node_offset_if_cond = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len); const else_body = sema.code.bodySlice( extra.end + then_body.len, extra.data.else_body_len, ); const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, .{ .needed_comptime_reason = "condition in comptime branch must be comptime-known", .block_comptime_reason = block.comptime_reason, }); const inline_body = if (cond.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const result = try sema.analyzeInlineBody(block, inline_body, inst) orelse break; break :blk result; }, .condbr_inline => blk: { const inst_data = datas[@intFromEnum(inst)].pl_node; const cond_src = block.src(.{ .node_offset_if_cond = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len); const else_body = sema.code.bodySlice( extra.end + then_body.len, extra.data.else_body_len, ); const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition, .{ .needed_comptime_reason = "condition in comptime branch must be comptime-known", .block_comptime_reason = block.comptime_reason, }); const inline_body = if (cond.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const old_runtime_index = block.runtime_index; defer block.runtime_index = old_runtime_index; const result = try sema.analyzeInlineBody(block, inline_body, inst) orelse break; break :blk result; }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index); const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, .{ .needed_comptime_reason = "try operand inside comptime block must be comptime-known", .block_comptime_reason = block.comptime_reason, }); if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const result = try sema.analyzeInlineBody(block, inline_body, inst) orelse break; break :blk result; }, .try_ptr => blk: { if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index); const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len); const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, .{ .needed_comptime_reason = "try operand inside comptime block must be comptime-known", .block_comptime_reason = block.comptime_reason, }); if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const result = try sema.analyzeInlineBody(block, inline_body, inst) orelse break; break :blk result; }, .@"defer" => blk: { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"defer"; const defer_body = sema.code.bodySlice(inst_data.index, inst_data.len); if (sema.analyzeBodyInner(block, defer_body)) |_| { // The defer terminated noreturn - no more analysis needed. break; } else |err| switch (err) { error.ComptimeBreak => {}, else => |e| return e, } if (sema.comptime_break_inst != defer_body[defer_body.len - 1]) { return error.ComptimeBreak; } break :blk .void_value; }, .defer_err_code => blk: { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].defer_err_code; const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data; const defer_body = sema.code.bodySlice(extra.index, extra.len); const err_code = try sema.resolveInst(inst_data.err_code); map.putAssumeCapacity(extra.remapped_err_code, err_code); if (sema.analyzeBodyInner(block, defer_body)) |_| { // The defer terminated noreturn - no more analysis needed. break; } else |err| switch (err) { error.ComptimeBreak => {}, else => |e| return e, } if (sema.comptime_break_inst != defer_body[defer_body.len - 1]) { return error.ComptimeBreak; } break :blk .void_value; }, }; if (sema.isNoReturn(air_inst)) { // We're going to assume that the body itself is noreturn, so let's ensure that now assert(block.instructions.items.len > 0); assert(sema.isNoReturn(block.instructions.items[block.instructions.items.len - 1].toRef())); break; } map.putAssumeCapacity(inst, air_inst); i += 1; } } pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { if (zir_ref == .none) { return .none; } else { return resolveInst(sema, zir_ref); } } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { assert(zir_ref != .none); if (zir_ref.toIndex()) |i| { const inst = sema.inst_map.get(i).?; if (inst == .generic_poison) return error.GenericPoison; return inst; } // First section of indexes correspond to a set number of constant values. // We intentionally map the same indexes to the same values between ZIR and AIR. return @enumFromInt(@intFromEnum(zir_ref)); } fn resolveConstBool( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, reason: NeededComptimeReason, ) !bool { const air_inst = try sema.resolveInst(zir_ref); const wanted_type = Type.bool; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); return val.toBool(); } fn resolveConstString( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, reason: NeededComptimeReason, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); return sema.toConstString(block, src, air_inst, reason); } pub fn toConstString( sema: *Sema, block: *Block, src: LazySrcLoc, air_inst: Air.Inst.Ref, reason: NeededComptimeReason, ) ![]u8 { const pt = sema.pt; const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src); const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason); return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt); } pub fn resolveConstStringIntern( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, reason: NeededComptimeReason, ) !InternPool.NullTerminatedString { const air_inst = try sema.resolveInst(zir_ref); const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason); return sema.sliceToIpString(block, src, val, reason); } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = try sema.resolveInst(zir_ref); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.isGenericPoison()) return error.GenericPoison; return ty; } fn resolveDestType( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, strat: enum { remove_eu_opt, remove_eu, remove_opt }, builtin_name: []const u8, ) !Type { const pt = sema.pt; const mod = pt.zcu; const remove_eu = switch (strat) { .remove_eu_opt, .remove_eu => true, .remove_opt => false, }; const remove_opt = switch (strat) { .remove_eu_opt, .remove_opt => true, .remove_eu => false, }; const raw_ty = sema.resolveType(block, src, zir_ref) catch |err| switch (err) { error.GenericPoison => { // Cast builtins use their result type as the destination type, but // it could be an anytype argument, which we can't catch in AstGen. const msg = msg: { const msg = try sema.errMsg(src, "{s} must have a known result type", .{builtin_name}); errdefer msg.destroy(sema.gpa); switch (sema.genericPoisonReason(block, zir_ref)) { .anytype_param => |call_src| try sema.errNote(call_src, msg, "result type is unknown due to anytype parameter", .{}), .anyopaque_ptr => |ptr_src| try sema.errNote(ptr_src, msg, "result type is unknown due to opaque pointer type", .{}), .unknown => {}, } try sema.errNote(src, msg, "use @as to provide explicit result type", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, else => |e| return e, }; if (remove_eu and raw_ty.zigTypeTag(mod) == .ErrorUnion) { const eu_child = raw_ty.errorUnionPayload(mod); if (remove_opt and eu_child.zigTypeTag(mod) == .Optional) { return eu_child.childType(mod); } return eu_child; } if (remove_opt and raw_ty.zigTypeTag(mod) == .Optional) { return raw_ty.childType(mod); } return raw_ty; } const GenericPoisonReason = union(enum) { anytype_param: LazySrcLoc, anyopaque_ptr: LazySrcLoc, unknown, }; /// Backtracks through ZIR instructions to determine the reason a generic poison /// type was created. Used for error reporting. fn genericPoisonReason(sema: *Sema, block: *Block, ref: Zir.Inst.Ref) GenericPoisonReason { var cur = ref; while (true) { const inst = cur.toIndex() orelse return .unknown; switch (sema.code.instructions.items(.tag)[@intFromEnum(inst)]) { .validate_array_init_ref_ty => { const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data; cur = extra.ptr_ty; }, .array_init_elem_type => { const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin; cur = bin.lhs; }, .indexable_ptr_elem_type, .vector_elem_type => { const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; cur = un_node.operand; }, .struct_init_field_type => { const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, pl_node.payload_index).data; cur = extra.container_type; }, .elem_type => { // There are two cases here: the pointer type may already have been // generic poison, or it may have been an anyopaque pointer. const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_ref = sema.resolveInst(un_node.operand) catch |err| switch (err) { error.GenericPoison => unreachable, // this is a type, not a value }; const operand_val = operand_ref.toInterned() orelse return .unknown; if (operand_val == .generic_poison_type) { // The pointer was generic poison - keep looking. cur = un_node.operand; } else { // This must be an anyopaque pointer! return .{ .anyopaque_ptr = block.nodeOffset(un_node.src_node) }; } }, .call, .field_call => { // A function call can never return generic poison, so we must be // evaluating an `anytype` function parameter. // TODO: better source location - function decl rather than call const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; return .{ .anytype_param = block.nodeOffset(pl_node.src_node) }; }, else => return .unknown, } } } fn analyzeAsType( sema: *Sema, block: *Block, src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .needed_comptime_reason = "types must be comptime-known", }); return val.toType(); } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { const pt = sema.pt; const mod = pt.zcu; const comp = mod.comp; const gpa = sema.gpa; const ip = &mod.intern_pool; if (!comp.config.any_error_tracing) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); defer err_trace_block.instructions.deinit(gpa); const src: LazySrcLoc = LazySrcLoc.unneeded; // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; const addr_arr_ty = try pt.arrayType(.{ .len = err_return_trace_addr_count, .child = .usize_type, }); const addrs_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const st_ptr = try err_trace_block.addTy(.alloc, try pt.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; const instruction_addresses_field_name = try ip.getOrPutString(gpa, pt.tid, "instruction_addresses", .no_embedded_nulls); const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; const index_field_name = try ip.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); // @errorReturnTrace() = &st; _ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr); try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items); } /// Return the Value corresponding to a given AIR ref, or `null` if it refers to a runtime value. /// InternPool key `variable` is considered a runtime value. /// Generic poison causes `error.GenericPoison` to be returned. fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValueAllowVariables(inst)) orelse return null; if (val.isGenericPoison()) return error.GenericPoison; if (sema.pt.zcu.intern_pool.isVariable(val.toIntern())) return null; return val; } /// Like `resolveValue`, but emits an error if the value is not comptime-known. fn resolveConstValue( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, reason: NeededComptimeReason, ) CompileError!Value { return try sema.resolveValue(inst) orelse { return sema.failWithNeededComptime(block, src, reason); }; } /// Like `resolveValue`, but emits an error if the value is comptime-known to be undefined. fn resolveDefinedValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { const pt = sema.pt; const mod = pt.zcu; const val = try sema.resolveValue(air_ref) orelse return null; if (val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } return val; } /// Like `resolveValue`, but emits an error if the value is not comptime-known or is undefined. fn resolveConstDefinedValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, reason: NeededComptimeReason, ) CompileError!Value { const val = try sema.resolveConstValue(block, src, air_ref, reason); if (val.isUndef(sema.pt.zcu)) return sema.failWithUseOfUndef(block, src); return val; } /// Like `resolveValue`, but recursively resolves lazy values before returning. fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { return try sema.resolveLazyValue((try sema.resolveValue(inst)) orelse return null); } /// Like `resolveValue`, but any pointer value which does not correspond /// to a comptime-known integer (e.g. a decl pointer) returns `null`. /// Lazy values are recursively resolved. fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValue(inst)) orelse return null; if (sema.pt.zcu.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) { .nav, .uav, .comptime_alloc, .comptime_field => return null, .int => {}, .eu_payload, .opt_payload, .arr_elem, .field => unreachable, }; return try sema.resolveLazyValue(val); } /// Returns all InternPool keys representing values, including `variable`, `undef`, and `generic_poison`. fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const pt = sema.pt; assert(inst != .none); // First section of indexes correspond to a set number of constant values. if (@intFromEnum(inst) < InternPool.static_len) { return Value.fromInterned(@as(InternPool.Index, @enumFromInt(@intFromEnum(inst)))); } const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { if (inst.toInterned()) |ip_index| { const val = Value.fromInterned(ip_index); if (val.getVariable(pt.zcu) != null) return val; } return opv; } const ip_index = inst.toInterned() orelse { switch (air_tags[@intFromEnum(inst.toIndex().?)]) { .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, else => return null, } }; const val = Value.fromInterned(ip_index); if (val.isPtrToThreadLocal(pt.zcu)) return null; return val; } /// Returns a compile error if the value has tag `variable`. fn resolveInstConst( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, reason: NeededComptimeReason, ) CompileError!Value { const air_ref = try sema.resolveInst(zir_ref); return sema.resolveConstDefinedValue(block, src, air_ref, reason); } /// Value Tag may be `undef` or `variable`. pub fn resolveFinalDeclValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!Value { const val = try sema.resolveValueAllowVariables(air_ref) orelse { return sema.failWithNeededComptime(block, src, .{ .needed_comptime_reason = "global variable initializer must be comptime-known", }); }; if (val.isGenericPoison()) return error.GenericPoison; if (val.canMutateComptimeVarState(sema.pt.zcu)) { return sema.fail(block, src, "global variable contains reference to comptime var", .{}); } return val; } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: NeededComptimeReason) CompileError { const msg = msg: { const msg = try sema.errMsg(src, "unable to resolve comptime value", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "{s}", .{reason.needed_comptime_reason}); if (reason.block_comptime_reason) |block_comptime_reason| { try block_comptime_reason.explain(sema, msg); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{}); } fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "division by zero here causes undefined behavior", .{}); } fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { const pt = sema.pt; return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), }); } fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non_optional_ty: Type) CompileError { const pt = sema.pt; const msg = msg: { const msg = try sema.errMsg(src, "expected optional type, found '{}'", .{ non_optional_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (non_optional_ty.zigTypeTag(pt.zcu) == .ErrorUnion) { try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); } try addDeclaredHereNote(sema, msg, non_optional_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { const pt = sema.pt; const msg = msg: { const msg = try sema.errMsg(src, "type '{}' does not support array initialization syntax", .{ ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (ty.isSlice(pt.zcu)) { try sema.errNote(src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(pt.zcu).fmt(pt)}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithStructInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { const pt = sema.pt; return sema.fail(block, src, "type '{}' does not support struct initialization syntax", .{ ty.fmt(pt), }); } fn failWithErrorSetCodeMissing( sema: *Sema, block: *Block, src: LazySrcLoc, dest_err_set_ty: Type, src_err_set_ty: Type, ) CompileError { const pt = sema.pt; return sema.fail(block, src, "expected type '{}', found type '{}'", .{ dest_err_set_ty.fmt(pt), src_err_set_ty.fmt(pt), }); } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { const pt = sema.pt; const zcu = pt.zcu; if (int_ty.zigTypeTag(zcu) == .Vector) { const msg = msg: { const msg = try sema.errMsg(src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(pt), val.fmtValueSema(pt, sema), }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "when computing vector element at index '{d}'", .{vector_index}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } return sema.fail(block, src, "overflow of integer type '{}' with value '{}'", .{ int_ty.fmt(pt), val.fmtValueSema(pt, sema), }); } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { const pt = sema.pt; const mod = pt.zcu; const msg = msg: { const msg = try sema.errMsg(init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg; try sema.errNote(.{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .container_field_value = @intCast(field_index) }, }, msg, "default value set here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { const msg = msg: { const msg = try sema.errMsg(src, "async has not been implemented in the self-hosted compiler yet", .{}); errdefer msg.destroy(sema.gpa); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithInvalidFieldAccess( sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; const mod = pt.zcu; const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { const child_ty = inner_ty.optionalChild(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(src, "optional type '{}' does not support field access", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using '.?', 'orelse', or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { const child_ty = inner_ty.errorUnionPayload(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(src, "error union type '{}' does not support field access", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(pt)}); } fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Array => return field_name.eqlSlice("len", ip), .Pointer => { const ptr_info = ty.ptrInfo(mod); if (ptr_info.flags.size == .Slice) { return field_name.eqlSlice("ptr", ip) or field_name.eqlSlice("len", ip); } else if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) { return field_name.eqlSlice("len", ip); } else return false; }, .Type, .Struct, .Union => return true, else => return false, } } fn failWithComptimeErrorRetTrace( sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; const mod = pt.zcu; const msg = msg: { const msg = try sema.errMsg(src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); errdefer msg.destroy(sema.gpa); for (sema.comptime_err_ret_trace.items) |src_loc| { try sema.errNote(src_loc, msg, "error returned here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithInvalidPtrArithmetic(sema: *Sema, block: *Block, src: LazySrcLoc, arithmetic: []const u8, supports: []const u8) CompileError { const msg = msg: { const msg = try sema.errMsg(src, "invalid {s} arithmetic operator", .{arithmetic}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "{s} arithmetic only supports {s}", .{ arithmetic, supports }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } /// We don't return a pointer to the new error note because the pointer /// becomes invalid when you add another one. pub fn errNote( sema: *Sema, src: LazySrcLoc, parent: *Module.ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { return sema.pt.zcu.errNote(src, parent, format, args); } fn addFieldErrNote( sema: *Sema, container_ty: Type, field_index: usize, parent: *Module.ErrorMsg, comptime format: []const u8, args: anytype, ) !void { @setCold(true); const type_src = container_ty.srcLocOrNull(sema.pt.zcu) orelse return; const field_src: LazySrcLoc = .{ .base_node_inst = type_src.base_node_inst, .offset = .{ .container_field_name = @intCast(field_index) }, }; try sema.errNote(field_src, parent, format, args); } pub fn errMsg( sema: *Sema, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) Allocator.Error!*Module.ErrorMsg { assert(src.offset != .unneeded); return Module.ErrorMsg.create(sema.gpa, src, format, args); } pub fn fail( sema: *Sema, block: *Block, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) CompileError { const err_msg = try sema.errMsg(src, format, args); inline for (args) |arg| { if (@TypeOf(arg) == Type.Formatter) { try addDeclaredHereNote(sema, err_msg, arg.data.ty); } } return sema.failWithOwnedErrorMsg(block, err_msg); } pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.ErrorMsg) error{ AnalysisFail, OutOfMemory } { @setCold(true); const gpa = sema.gpa; const mod = sema.pt.zcu; if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch @panic("out of memory"); Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } if (block) |start_block| { var block_it = start_block; while (block_it.inlining) |inlining| { try sema.errNote( inlining.call_src, err_msg, "called from here", .{}, ); block_it = inlining.call_block; } } const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0; if (use_ref_trace) { err_msg.reference_trace_root = sema.owner.toOptional(); } const gop = try mod.failed_analysis.getOrPut(gpa, sema.owner); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; err_msg.destroy(gpa); } else { sema.err = err_msg; gop.value_ptr.* = err_msg; } return error.AnalysisFail; } /// Given an ErrorMsg, modify its message and source location to the given values, turning the /// original message into a note. Notes on the original message are preserved as further notes. /// Reference trace is preserved. fn reparentOwnedErrorMsg( sema: *Sema, src: LazySrcLoc, msg: *Module.ErrorMsg, comptime format: []const u8, args: anytype, ) !void { const msg_str = try std.fmt.allocPrint(sema.gpa, format, args); const orig_notes = msg.notes.len; msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1); std.mem.copyBackwards(Module.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]); msg.notes[0] = .{ .src_loc = msg.src_loc, .msg = msg.msg, }; msg.src_loc = src; msg.msg = msg_str; } const align_ty = Type.u29; pub fn analyzeAsAlign( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) !Alignment { const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{ .needed_comptime_reason = "alignment must be comptime-known", }); return sema.validateAlign(block, src, alignment_big); } fn validateAlign( sema: *Sema, block: *Block, src: LazySrcLoc, alignment: u64, ) !Alignment { const result = try validateAlignAllowZero(sema, block, src, alignment); if (result == .none) return sema.fail(block, src, "alignment must be >= 1", .{}); return result; } fn validateAlignAllowZero( sema: *Sema, block: *Block, src: LazySrcLoc, alignment: u64, ) !Alignment { if (alignment == 0) return .none; if (!std.math.isPowerOfTwo(alignment)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{ alignment, }); } return Alignment.fromNonzeroByteUnits(alignment); } fn resolveAlign( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !Alignment { const air_ref = try sema.resolveInst(zir_ref); return sema.analyzeAsAlign(block, src, air_ref); } fn resolveInt( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, dest_ty: Type, reason: NeededComptimeReason, ) !u64 { const air_ref = try sema.resolveInst(zir_ref); return sema.analyzeAsInt(block, src, air_ref, dest_ty, reason); } fn analyzeAsInt( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, dest_ty: Type, reason: NeededComptimeReason, ) !u64 { const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); return (try val.getUnsignedIntAdvanced(sema.pt, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, /// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`. fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const parent_ty = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type); const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu); const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len); for (sema.code.extra[extra_index..][0..captures_len], captures) |raw, *capture| { const zir_capture: Zir.Inst.Capture = @bitCast(raw); capture.* = switch (zir_capture.unwrap()) { .nested => |parent_idx| parent_captures.get(ip)[parent_idx], .instruction_load => |ptr_inst| InternPool.CaptureValue.wrap(capture: { const ptr_ref = try sema.resolveInst(ptr_inst.toRef()); const ptr_val = try sema.resolveValue(ptr_ref) orelse { break :capture .{ .runtime = sema.typeOf(ptr_ref).childType(zcu).toIntern() }; }; // TODO: better source location const unresolved_loaded_val = try sema.pointerDeref(block, type_src, ptr_val, sema.typeOf(ptr_ref)) orelse { break :capture .{ .runtime = sema.typeOf(ptr_ref).childType(zcu).toIntern() }; }; const loaded_val = try sema.resolveLazyValue(unresolved_loaded_val); if (loaded_val.canMutateComptimeVarState(zcu)) { // TODO: source location of captured value return sema.fail(block, type_src, "type capture contains reference to comptime var", .{}); } break :capture .{ .@"comptime" = loaded_val.toIntern() }; }), .instruction => |inst| InternPool.CaptureValue.wrap(capture: { const air_ref = try sema.resolveInst(inst.toRef()); if (try sema.resolveValueResolveLazy(air_ref)) |val| { if (val.canMutateComptimeVarState(zcu)) { // TODO: source location of captured value return sema.fail(block, type_src, "type capture contains reference to comptime var", .{}); } break :capture .{ .@"comptime" = val.toIntern() }; } break :capture .{ .runtime = sema.typeOf(air_ref).toIntern() }; }), .decl_val => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); const nav = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? break :capture InternPool.CaptureValue.wrap(.{ .nav_val = nav }); }, .decl_ref => |str| capture: { const decl_name = try ip.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(str), .no_embedded_nulls, ); const nav = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? break :capture InternPool.CaptureValue.wrap(.{ .nav_ref = nav }); }, }; } return captures; } fn zirStructDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); const extra = sema.code.extraData(Zir.Inst.StructDecl, extended.operand); const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }; var extra_index = extra.end; const captures_len = if (small.has_captures_len) blk: { const captures_len = sema.code.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; const captures = try sema.getCaptures(block, src, extra_index, captures_len); extra_index += captures_len; if (small.has_backing_int) { const backing_int_body_len = sema.code.extra[extra_index]; extra_index += 1; // backing_int_body_len if (backing_int_body_len == 0) { extra_index += 1; // backing_int_ref } else { extra_index += backing_int_body_len; // backing_int_body_inst } } const struct_init: InternPool.StructTypeInit = .{ .layout = small.layout, .fields_len = fields_len, .known_non_opv = small.known_non_opv, .requires_comptime = if (small.known_comptime_only) .yes else .unknown, .is_tuple = small.is_tuple, .any_comptime_fields = small.any_comptime_fields, .any_default_inits = small.any_default_inits, .inits_resolved = false, .any_aligned_fields = small.any_aligned_fields, .key = .{ .declared = .{ .zir_index = tracked_inst, .captures = captures, } }, }; const wip_ty = switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) { .existing => |ty| { const new_ty = try pt.ensureTypeUpToDate(ty, false); // Make sure we update the namespace if the declaration is re-analyzed, to pick // up on e.g. changed comptime decls. try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); try sema.declareDependency(.{ .interned = new_ty }); try sema.addTypeReferenceEntry(src, new_ty); return Air.internedToRef(new_ty); }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); wip_ty.setName(ip, try sema.createTypeName( block, small.name_strategy, "struct", inst, wip_ty.index, )); const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); if (pt.zcu.comp.incremental) { try ip.addDependency( sema.gpa, AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = tracked_inst }, ); } const decls = sema.code.bodySlice(extra_index, decls_len); try pt.scanNamespace(new_namespace_index, decls); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } fn createTypeName( sema: *Sema, block: *Block, name_strategy: Zir.Inst.NameStrategy, anon_prefix: []const u8, inst: ?Zir.Inst.Index, /// This is used purely to give the type a unique name in the `anon` case. type_index: InternPool.Index, ) !InternPool.NullTerminatedString { const pt = sema.pt; const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; switch (name_strategy) { .anon => {}, // handled after switch .parent => return block.type_name_ctx, .func => func_strat: { const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail); const zir_tags = sema.code.instructions.items(.tag); var buf: std.ArrayListUnmanaged(u8) = .{}; defer buf.deinit(gpa); const writer = buf.writer(gpa); try writer.print("{}(", .{block.type_name_ctx.fmt(ip)}); var arg_i: usize = 0; for (fn_info.param_body) |zir_inst| switch (zir_tags[@intFromEnum(zir_inst)]) { .param, .param_comptime, .param_anytype, .param_anytype_comptime => { const arg = sema.inst_map.get(zir_inst).?; // If this is being called in a generic function then analyzeCall will // have already resolved the args and this will work. // If not then this is a struct type being returned from a non-generic // function and the name doesn't matter since it will later // result in a compile error. const arg_val = try sema.resolveValue(arg) orelse break :func_strat; // fall through to anon strat if (arg_i != 0) try writer.writeByte(','); // Limiting the depth here helps avoid type names getting too long, which // in turn helps to avoid unreasonably long symbol names for namespaced // symbols. Such names should ideally be human-readable, and additionally, // some tooling may not support very long symbol names. try writer.print("{}", .{Value.fmtValueSemaFull(.{ .val = arg_val, .pt = pt, .opt_sema = sema, .depth = 1, })}); arg_i += 1; continue; }, else => continue, }; try writer.writeByte(')'); return ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); }, .dbg_var => { // TODO: this logic is questionable. We ideally should be traversing the `Block` rather than relying on the order of AstGen instructions. const ref = inst.?.toRef(); const zir_tags = sema.code.instructions.items(.tag); const zir_data = sema.code.instructions.items(.data); for (@intFromEnum(inst.?)..zir_tags.len) |i| switch (zir_tags[i]) { .dbg_var_ptr, .dbg_var_val => if (zir_data[i].str_op.operand == ref) { return ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); }, else => {}, }; // fall through to anon strat }, } // anon strat handling // It would be neat to have "struct:line:column" but this name has // to survive incremental updates, where it may have been shifted down // or up to a different line, but unchanged, and thus not unnecessarily // semantically analyzed. // TODO: that would be possible, by detecting line number changes and renaming // types appropriately. However, `@typeName` becomes a problem then. If we remove // that builtin from the language, we can consider this. return ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(type_index), }, .no_embedded_nulls); } fn zirEnumDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small); const extra = sema.code.extraData(Zir.Inst.EnumDecl, extended.operand); var extra_index: usize = extra.end; const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; break :blk tag_type_ref; } else .none; const captures_len = if (small.has_captures_len) blk: { const captures_len = sema.code.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; const body_len = if (small.has_body_len) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; const captures = try sema.getCaptures(block, src, extra_index, captures_len); extra_index += captures_len; const decls = sema.code.bodySlice(extra_index, decls_len); extra_index += decls_len; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; const body_end = extra_index; extra_index += bit_bags_count; const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { if (bag != 0) break true; } else false; const enum_init: InternPool.EnumTypeInit = .{ .has_values = any_values, .tag_mode = if (small.nonexhaustive) .nonexhaustive else if (tag_type_ref == .none) .auto else .explicit, .fields_len = fields_len, .key = .{ .declared = .{ .zir_index = tracked_inst, .captures = captures, } }, }; const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) { .existing => |ty| { const new_ty = try pt.ensureTypeUpToDate(ty, false); // Make sure we update the namespace if the declaration is re-analyzed, to pick // up on e.g. changed comptime decls. try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); try sema.declareDependency(.{ .interned = new_ty }); try sema.addTypeReferenceEntry(src, new_ty); return Air.internedToRef(new_ty); }, .wip => |wip| wip, }; // Once this is `true`, we will not delete the decl or type even upon failure, since we // have finished constructing the type and are in the process of analyzing it. var done = false; errdefer if (!done) wip_ty.cancel(ip, pt.tid); const type_name = try sema.createTypeName( block, small.name_strategy, "enum", inst, wip_ty.index, ); wip_ty.setName(ip, type_name); const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); errdefer if (!done) pt.destroyNamespace(new_namespace_index); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); try pt.scanNamespace(new_namespace_index, decls); try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); // We've finished the initial construction of this type, and are about to perform analysis. // Set the Cau and namespace appropriately, and don't destroy anything on failure. wip_ty.prepare(ip, new_cau_index, new_namespace_index); done = true; try Sema.resolveDeclaredEnum( pt, wip_ty, inst, tracked_inst, new_namespace_index, type_name, new_cau_index, small, body, tag_type_ref, any_values, fields_len, sema.code, body_end, ); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } return Air.internedToRef(wip_ty.index); } fn zirUnionDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); const extra = sema.code.extraData(Zir.Inst.UnionDecl, extended.operand); var extra_index: usize = extra.end; const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; extra_index += @intFromBool(small.has_tag_type); const captures_len = if (small.has_captures_len) blk: { const captures_len = sema.code.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; extra_index += @intFromBool(small.has_body_len); const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; const captures = try sema.getCaptures(block, src, extra_index, captures_len); extra_index += captures_len; const union_init: InternPool.UnionTypeInit = .{ .flags = .{ .layout = small.layout, .status = .none, .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) .tagged else if (small.layout != .auto) .none else switch (block.wantSafety()) { true => .safety, false => .none, }, .any_aligned_fields = small.any_aligned_fields, .requires_comptime = .unknown, .assumed_runtime_bits = false, .assumed_pointer_aligned = false, .alignment = .none, }, .fields_len = fields_len, .enum_tag_ty = .none, // set later .field_types = &.{}, // set later .field_aligns = &.{}, // set later .key = .{ .declared = .{ .zir_index = tracked_inst, .captures = captures, } }, }; const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) { .existing => |ty| { const new_ty = try pt.ensureTypeUpToDate(ty, false); // Make sure we update the namespace if the declaration is re-analyzed, to pick // up on e.g. changed comptime decls. try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod)); try sema.declareDependency(.{ .interned = new_ty }); try sema.addTypeReferenceEntry(src, new_ty); return Air.internedToRef(new_ty); }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); wip_ty.setName(ip, try sema.createTypeName( block, small.name_strategy, "union", inst, wip_ty.index, )); const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); if (pt.zcu.comp.incremental) { try mod.intern_pool.addDependency( gpa, AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = tracked_inst }, ); } const decls = sema.code.bodySlice(extra_index, decls_len); try pt.scanNamespace(new_namespace_index, decls); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } fn zirOpaqueDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small); const extra = sema.code.extraData(Zir.Inst.OpaqueDecl, extended.operand); var extra_index: usize = extra.end; const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; const captures_len = if (small.has_captures_len) blk: { const captures_len = sema.code.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; const captures = try sema.getCaptures(block, src, extra_index, captures_len); extra_index += captures_len; const opaque_init: InternPool.OpaqueTypeInit = .{ .key = .{ .declared = .{ .zir_index = tracked_inst, .captures = captures, } }, }; const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { .existing => |ty| { // Make sure we update the namespace if the declaration is re-analyzed, to pick // up on e.g. changed comptime decls. try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod)); try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); wip_ty.setName(ip, try sema.createTypeName( block, small.name_strategy, "opaque", inst, wip_ty.index, )); const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); errdefer pt.destroyNamespace(new_namespace_index); const decls = sema.code.bodySlice(extra_index, decls_len); try pt.scanNamespace(new_namespace_index, decls); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index)); } fn zirErrorSetDecl( sema: *Sema, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index: u32 = @intCast(extra.end); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const name = sema.code.nullTerminatedString(name_index); const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); _ = try pt.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } return Air.internedToRef((try pt.errorSetFromUnsortedNames(names.keys())).toIntern()); } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { try sema.fn_ret_ty.resolveFields(pt); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } const target = pt.zcu.getTarget(); const ptr_type = try pt.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); if (block.inlining != null) { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. return block.addTy(.alloc, ptr_type); } return block.addTy(.ret_ptr, ptr_type); } fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok; const operand = try sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, block.tokenOffset(inst_data.src_tok), operand); } fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); return sema.ensureResultUsed(block, sema.typeOf(operand), src); } fn ensureResultUsed( sema: *Sema, block: *Block, ty: Type, src: LazySrcLoc, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet => return sema.fail(block, src, "error set is ignored", .{}), .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(src, "error union is ignored", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, else => { const msg = msg: { const msg = try sema.errMsg(src, "value of type '{}' ignored", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "all non-void values must be used", .{}); try sema.errNote(src, msg, "to discard the value, assign it to '_'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, } } fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => return sema.fail(block, src, "error set is discarded", .{}), .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(src, "error union is discarded", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, else => return, } } fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) operand_ty.childType(mod) else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(src, "error union payload is ignored", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "payload value can be explicitly ignored with '|_|'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const object = try sema.resolveInst(inst_data.operand); return indexablePtrLen(sema, block, src, object); } fn indexablePtrLen( sema: *Sema, block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, object, field_name, src); } fn indexablePtrLenOrNone( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls); return sema.fieldVal(block, src, operand, field_name, src); } fn zirAllocExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node }); const align_src = block.src(.{ .node_offset_var_decl_align = extra.data.src_node }); const small: Zir.Inst.AllocExtended.Small = @bitCast(extended.small); var extra_index: usize = extra.end; const var_ty: Type = if (small.has_type) blk: { const type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; break :blk try sema.resolveType(block, ty_src, type_ref); } else undefined; const alignment = if (small.has_align) blk: { const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; break :blk try sema.resolveAlign(block, align_src, align_ref); } else .none; if (block.is_comptime or small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, .data = .{ .inferred_alloc_comptime = .{ .alignment = alignment, .is_const = small.is_const, .ptr = undefined, } }, }); return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef(); } } if (small.has_type) { if (!small.is_const) { try sema.validateVarType(block, ty_src, var_ty, false); } const target = pt.zcu.getTarget(); try var_ty.resolveLayout(pt); if (sema.func_is_naked and try sema.typeHasRuntimeBits(var_ty)) { const var_src = block.src(.{ .node_offset_store_ptr = extra.data.src_node }); return sema.fail(block, var_src, "local variable in naked function", .{}); } const ptr_type = try sema.pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, .address_space = target_util.defaultAddressSpace(target, .local), }, }); const ptr = try block.addTy(.alloc, ptr_type); if (small.is_const) { const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); try sema.base_allocs.put(gpa, ptr_inst, ptr_inst); } return ptr; } const result_index = try block.addInstAsIndex(.{ .tag = .inferred_alloc, .data = .{ .inferred_alloc = .{ .alignment = alignment, .is_const = small.is_const, } }, }); try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); if (small.is_const) { try sema.maybe_comptime_allocs.put(gpa, result_index, .{ .runtime_index = block.runtime_index }); try sema.base_allocs.put(gpa, result_index, result_index); } return result_index.toRef(); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); return sema.analyzeComptimeAlloc(block, var_ty, .none); } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = Type.fromInterned(ptr_info.child); // If the alloc was created in a comptime scope, we already created a comptime alloc for it. // However, if the final constructed value does not reference comptime-mutable memory, we wish // to promote it to an anon decl. already_ct: { const ptr_val = try sema.resolveValue(alloc) orelse break :already_ct; // If this was a comptime inferred alloc, then `storeToInferredAllocComptime` // might have already done our job and created an anon decl ref. switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.base_addr) { .uav => { // The comptime-ification was already done for us. // Just make sure the pointer is const. return sema.makePtrConst(block, alloc); }, else => {}, }, else => {}, } if (!sema.isComptimeMutablePtr(ptr_val)) break :already_ct; const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; assert(ptr.byte_offset == 0); const alloc_index = ptr.base_addr.comptime_alloc; const ct_alloc = sema.getComptimeAlloc(alloc_index); const interned = try ct_alloc.val.intern(pt, sema.arena); if (interned.canMutateComptimeVarState(mod)) { // Preserve the comptime alloc, just make the pointer const. ct_alloc.val = .{ .interned = interned.toIntern() }; ct_alloc.is_const = true; return sema.makePtrConst(block, alloc); } else { // Promote the constant to an anon decl. const new_mut_ptr = Air.internedToRef(try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .uav = .{ .val = interned.toIntern(), .orig_ty = alloc_ty.toIntern(), } }, .byte_offset = 0, } })); return sema.makePtrConst(block, new_mut_ptr); } } // Otherwise, check if the alloc is comptime-known despite being in a runtime scope. if (try sema.resolveComptimeKnownAllocPtr(block, alloc, null)) |ptr_val| { return sema.makePtrConst(block, Air.internedToRef(ptr_val)); } if (try sema.typeRequiresComptime(elem_ty)) { // The value was initialized through RLS, so we didn't detect the runtime condition earlier. // TODO: source location of runtime control flow const init_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); return sema.fail(block, init_src, "value with comptime-only type '{}' depends on runtime control flow", .{elem_ty.fmt(pt)}); } // This is a runtime value. return sema.makePtrConst(block, alloc); } /// If `alloc` is an inferred allocation, `resolved_inferred_ty` is taken to be its resolved /// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`. fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index { const pt = sema.pt; const zcu = pt.zcu; const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(zcu); const elem_ty = Type.fromInterned(ptr_info.child); const alloc_inst = alloc.toIndex() orelse return null; const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null; const stores = comptime_info.value.stores.items(.inst); // Since the entry existed in `maybe_comptime_allocs`, the allocation is comptime-known. // We will resolve and return its value. // We expect to have emitted at least one store, unless the elem type is OPV. if (stores.len == 0) { const val = (try sema.typeHasOnePossibleValue(elem_ty)).?.toIntern(); return sema.finishResolveComptimeKnownAllocPtr(block, alloc_ty, val, null, alloc_inst, comptime_info.value); } // In general, we want to create a comptime alloc of the correct type and // apply the stores to that alloc in order. However, before going to all // that effort, let's optimize for the common case of a single store. simple: { if (stores.len != 1) break :simple; const store_inst = sema.air_instructions.get(@intFromEnum(stores[0])); switch (store_inst.tag) { .store, .store_safe => {}, .set_union_tag, .optional_payload_ptr_set, .errunion_payload_ptr_set => break :simple, // there's OPV stuff going on! else => unreachable, } if (store_inst.data.bin_op.lhs != alloc) break :simple; const val = store_inst.data.bin_op.rhs.toInterned().?; assert(zcu.intern_pool.typeOf(val) == elem_ty.toIntern()); return sema.finishResolveComptimeKnownAllocPtr(block, alloc_ty, val, null, alloc_inst, comptime_info.value); } // The simple strategy failed: we must create a mutable comptime alloc and // perform all of the runtime store operations at comptime. const ct_alloc = try sema.newComptimeAlloc(block, elem_ty, ptr_info.flags.alignment); const alloc_ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = ct_alloc }, .byte_offset = 0, } }); // Maps from pointers into the runtime allocs, to comptime-mutable pointers into the comptime alloc var ptr_mapping = std.AutoHashMap(Air.Inst.Index, InternPool.Index).init(sema.arena); try ptr_mapping.ensureTotalCapacity(@intCast(stores.len)); ptr_mapping.putAssumeCapacity(alloc_inst, alloc_ptr); // Whilst constructing our mapping, we will also initialize optional and error union payloads when // we encounter the corresponding pointers. For this reason, the ordering of `to_map` matters. var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len); for (stores) |store_inst_idx| { const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx)); const ptr_to_map = switch (store_inst.tag) { .store, .store_safe => store_inst.data.bin_op.lhs.toIndex().?, // Map the pointer being stored to. .set_union_tag => continue, // We can completely ignore these: we'll do it implicitly when we get the field pointer. .optional_payload_ptr_set, .errunion_payload_ptr_set => store_inst_idx, // Map the generated pointer itself. else => unreachable, }; to_map.appendAssumeCapacity(ptr_to_map); } const tmp_air = sema.getTmpAir(); while (to_map.popOrNull()) |air_ptr| { if (ptr_mapping.contains(air_ptr)) continue; const PointerMethod = union(enum) { same_addr, opt_payload, eu_payload, field: u32, elem: u64, }; const inst_tag = tmp_air.instructions.items(.tag)[@intFromEnum(air_ptr)]; const air_parent_ptr: Air.Inst.Ref, const method: PointerMethod = switch (inst_tag) { .struct_field_ptr => blk: { const data = tmp_air.extraData( Air.StructField, tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_pl.payload, ).data; break :blk .{ data.struct_operand, .{ .field = data.field_index }, }; }, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3, => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .{ .field = switch (inst_tag) { .struct_field_ptr_index_0 => 0, .struct_field_ptr_index_1 => 1, .struct_field_ptr_index_2 => 2, .struct_field_ptr_index_3 => 3, else => unreachable, } }, }, .ptr_slice_ptr_ptr => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .{ .field = Value.slice_ptr_index }, }, .ptr_slice_len_ptr => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .{ .field = Value.slice_len_index }, }, .ptr_elem_ptr => blk: { const data = tmp_air.extraData( Air.Bin, tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_pl.payload, ).data; const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, .{ .elem = try idx_val.toUnsignedIntSema(pt) }, }; }, .bitcast => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .same_addr, }, .optional_payload_ptr_set => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .opt_payload, }, .errunion_payload_ptr_set => .{ tmp_air.instructions.items(.data)[@intFromEnum(air_ptr)].ty_op.operand, .eu_payload, }, else => unreachable, }; const decl_parent_ptr = ptr_mapping.get(air_parent_ptr.toIndex().?) orelse { // Resolve the parent pointer first. // Note that we add in what seems like the wrong order, because we're popping from the end of this array. try to_map.appendSlice(&.{ air_ptr, air_parent_ptr.toIndex().? }); continue; }; const new_ptr_ty = tmp_air.typeOfIndex(air_ptr, &zcu.intern_pool).toIntern(); const new_ptr = switch (method) { .same_addr => try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, decl_parent_ptr, new_ptr_ty), .opt_payload => ptr: { // Set the optional to non-null at comptime. // If the payload is OPV, we must use that value instead of undef. const opt_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); const payload_ty = opt_ty.optionalChild(zcu); const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); const opt_val = try pt.intern(.{ .opt = .{ .ty = opt_ty.toIntern(), .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(pt)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. // If the payload is OPV, we must use that value instead of undef. const eu_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); const payload_ty = eu_ty.errorUnionPayload(zcu); const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); const eu_val = try pt.intern(.{ .error_union = .{ .ty = eu_ty.toIntern(), .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(pt)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); if (zcu.typeToUnion(maybe_union_ty)) |union_obj| { // As this is a union field, we must store to the pointer now to set the tag. // If the payload is OPV, there will not be a payload store, so we store that value. // Otherwise, there will be a payload store to process later, so undef will suffice. const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]); const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx); const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, pt)).toIntern(); }, .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, pt)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } // We have a correlation between AIR pointers and decl pointers. Perform all stores at comptime. // Any implicit stores performed by `optional_payload_ptr_set`, `errunion_payload_ptr_set`, or // `set_union_tag` instructions were already done above. for (stores) |store_inst_idx| { const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx)); switch (store_inst.tag) { .set_union_tag => {}, // Handled implicitly by field pointers above .optional_payload_ptr_set, .errunion_payload_ptr_set => {}, // Handled explicitly above .store, .store_safe => { const air_ptr_inst = store_inst.data.bin_op.lhs.toIndex().?; const store_val = (try sema.resolveValue(store_inst.data.bin_op.rhs)).?; const new_ptr = ptr_mapping.get(air_ptr_inst).?; try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(zcu.intern_pool.typeOf(store_val.toIntern()))); }, else => unreachable, } } // The value is finalized - load it! const val = (try sema.pointerDeref(block, LazySrcLoc.unneeded, Value.fromInterned(alloc_ptr), alloc_ty)).?.toIntern(); return sema.finishResolveComptimeKnownAllocPtr(block, alloc_ty, val, ct_alloc, alloc_inst, comptime_info.value); } /// Given the resolved comptime-known value, rewrites the dead AIR to not /// create a runtime stack allocation. Also places the resulting value into /// either an anon decl ref or a comptime alloc depending on whether it /// references comptime-mutable memory. If `existing_comptime_alloc` is /// passed, it is a scratch allocation which already contains `result_val`. /// Same return type as `resolveComptimeKnownAllocPtr` so we can tail call. fn finishResolveComptimeKnownAllocPtr( sema: *Sema, block: *Block, alloc_ty: Type, result_val: InternPool.Index, existing_comptime_alloc: ?ComptimeAllocIndex, alloc_inst: Air.Inst.Index, comptime_info: MaybeComptimeAlloc, ) CompileError!?InternPool.Index { const pt = sema.pt; const zcu = pt.zcu; // We're almost done - we have the resolved comptime value. We just need to // eliminate the now-dead runtime instructions. // We will rewrite the AIR to eliminate the alloc and all stores to it. // This will cause instructions deriving field pointers etc of the alloc to // become invalid, however, since we are removing all stores to those pointers, // they will be eliminated by Liveness before they reach codegen. // The specifics of this instruction aren't really important: we just want // Liveness to elide it. const nop_inst: Air.Inst = .{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = .u8_type, .operand = .zero_u8 } } }; sema.air_instructions.set(@intFromEnum(alloc_inst), nop_inst); for (comptime_info.stores.items(.inst)) |store_inst| { sema.air_instructions.set(@intFromEnum(store_inst), nop_inst); } if (Value.fromInterned(result_val).canMutateComptimeVarState(zcu)) { const alloc_index = existing_comptime_alloc orelse a: { const idx = try sema.newComptimeAlloc(block, alloc_ty.childType(zcu), alloc_ty.ptrAlignment(pt)); const alloc = sema.getComptimeAlloc(idx); alloc.val = .{ .interned = result_val }; break :a idx; }; sema.getComptimeAlloc(alloc_index).is_const = true; return try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = alloc_index }, .byte_offset = 0, } }); } else { return try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .uav = .{ .orig_ty = alloc_ty.toIntern(), .val = result_val, } }, .byte_offset = 0, } }); } } fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { var ptr_info = ptr_ty.ptrInfo(sema.pt.zcu); ptr_info.flags.is_const = true; return sema.pt.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { const alloc_ty = sema.typeOf(alloc); const const_ptr_ty = try sema.makePtrTyConst(alloc_ty); // Detect if a comptime value simply needs to have its type changed. if (try sema.resolveValue(alloc)) |val| { return Air.internedToRef((try sema.pt.getCoerced(val, const_ptr_ty)).toIntern()); } return block.addBitCast(const_ptr_ty, alloc); } fn zirAllocInferredComptime( sema: *Sema, is_const: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, .data = .{ .inferred_alloc_comptime = .{ .alignment = .none, .is_const = is_const, .ptr = undefined, } }, }); return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef(); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, .none); } if (sema.func_is_naked and try sema.typeHasRuntimeBits(var_ty)) { const mut_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node }); return sema.fail(block, mut_src, "local variable in naked function", .{}); } const target = pt.zcu.getTarget(); const ptr_type = try pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const ptr = try block.addTy(.alloc, ptr_type); const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); try sema.base_allocs.put(sema.gpa, ptr_inst, ptr_inst); return ptr; } fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, .none); } if (sema.func_is_naked and try sema.typeHasRuntimeBits(var_ty)) { const var_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node }); return sema.fail(block, var_src, "local variable in naked function", .{}); } try sema.validateVarType(block, ty_src, var_ty, false); const target = pt.zcu.getTarget(); const ptr_type = try pt.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); return block.addTy(.alloc, ptr_type); } fn zirAllocInferred( sema: *Sema, block: *Block, is_const: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; if (block.is_comptime) { try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, .data = .{ .inferred_alloc_comptime = .{ .alignment = .none, .is_const = is_const, .ptr = undefined, } }, }); return @as(Air.Inst.Index, @enumFromInt(sema.air_instructions.len - 1)).toRef(); } const result_index = try block.addInstAsIndex(.{ .tag = .inferred_alloc, .data = .{ .inferred_alloc = .{ .alignment = .none, .is_const = is_const, } }, }); try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); if (is_const) { try sema.maybe_comptime_allocs.put(gpa, result_index, .{ .runtime_index = block.runtime_index }); try sema.base_allocs.put(sema.gpa, result_index, result_index); } return result_index.toRef(); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node }); const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = ptr.toIndex().?; const target = mod.getTarget(); switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) { .inferred_alloc_comptime => { // The work was already done for us by `Sema.storeToInferredAllocComptime`. // All we need to do is remap the pointer. const iac = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc_comptime; const resolved_ptr = iac.ptr; if (std.debug.runtime_safety) { // The inferred_alloc_comptime should never be referenced again sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = undefined, .data = undefined }); } const val = switch (mod.intern_pool.indexToKey(resolved_ptr).ptr.base_addr) { .uav => |a| a.val, .comptime_alloc => |i| val: { const alloc = sema.getComptimeAlloc(i); break :val (try alloc.val.intern(pt, sema.arena)).toIntern(); }, else => unreachable, }; if (mod.intern_pool.isFuncBody(val)) { const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); if (try sema.fnHasRuntimeBits(ty)) { try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val })); try mod.ensureFuncBodyAnalysisQueued(val); } } // Remap the ZIR operand to the resolved pointer value sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(resolved_ptr)); }, .inferred_alloc => { const ia1 = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc; const ia2 = sema.unresolved_inferred_allocs.fetchSwapRemove(ptr_inst).?.value; const peer_vals = try sema.arena.alloc(Air.Inst.Ref, ia2.prongs.items.len); for (peer_vals, ia2.prongs.items) |*peer_val, store_inst| { assert(sema.air_instructions.items(.tag)[@intFromEnum(store_inst)] == .store); const bin_op = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op; peer_val.* = bin_op.rhs; } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); const final_ptr_ty = try pt.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, .address_space = target_util.defaultAddressSpace(target, .local), }, }); if (!ia1.is_const) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } else if (try sema.resolveComptimeKnownAllocPtr(block, ptr, final_ptr_ty)) |ptr_val| { const const_ptr_ty = try sema.makePtrTyConst(final_ptr_ty); const new_const_ptr = try pt.getCoerced(Value.fromInterned(ptr_val), const_ptr_ty); // Remap the ZIR operand to the resolved pointer value sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, Air.internedToRef(new_const_ptr.toIntern())); // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. try final_elem_ty.resolveFully(pt); return; } if (try sema.typeRequiresComptime(final_elem_ty)) { // The alloc wasn't comptime-known per the above logic, so the // type cannot be comptime-only. // TODO: source location of runtime control flow return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(pt)}); } if (sema.func_is_naked and try sema.typeHasRuntimeBits(final_elem_ty)) { const mut_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node }); return sema.fail(block, mut_src, "local variable in naked function", .{}); } // Change it to a normal alloc. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, .data = .{ .ty = final_ptr_ty }, }); if (ia1.is_const) { // Remap the ZIR operand to the pointer const sema.inst_map.putAssumeCapacity(inst_data.operand.toIndex().?, try sema.makePtrConst(block, ptr)); } // Now we need to go back over all the store instructions, and do the logic as if // the new result ptr type was available. for (ia2.prongs.items) |placeholder_inst| { var replacement_block = block.makeSubBlock(); defer replacement_block.instructions.deinit(gpa); assert(sema.air_instructions.items(.tag)[@intFromEnum(placeholder_inst)] == .store); const bin_op = sema.air_instructions.items(.data)[@intFromEnum(placeholder_inst)].bin_op; try sema.storePtr2(&replacement_block, src, bin_op.lhs, src, bin_op.rhs, src, .store); // If only one instruction is produced then we can replace the store // placeholder instruction with this instruction; no need for an entire block. if (replacement_block.instructions.items.len == 1) { const only_inst = replacement_block.instructions.items[0]; sema.air_instructions.set(@intFromEnum(placeholder_inst), sema.air_instructions.get(@intFromEnum(only_inst))); continue; } // Here we replace the placeholder store instruction with a block // that does the actual store logic. _ = try replacement_block.addBr(placeholder_inst, .void_value); try sema.air_extra.ensureUnusedCapacity( gpa, @typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len, ); sema.air_instructions.set(@intFromEnum(placeholder_inst), .{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(replacement_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items)); } }, else => unreachable, } } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); const src = block.nodeOffset(inst_data.src_node); var len: Air.Inst.Ref = .none; var len_val: ?Value = null; var len_idx: u32 = undefined; var any_runtime = false; const runtime_arg_lens = try gpa.alloc(Air.Inst.Ref, args.len); defer gpa.free(runtime_arg_lens); // First pass to look for comptime values. for (args, 0..) |zir_arg, i_usize| { const i: u32 = @intCast(i_usize); runtime_arg_lens[i] = .none; if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; const arg_src = block.src(.{ .for_input = .{ .for_node_offset = inst_data.src_node, .input_index = i, } }); const arg_len_uncoerced = if (is_int) object else l: { if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{}); if (object_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.errNote(arg_src, msg, "consider using 'try', 'catch', or 'if'", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (!object_ty.indexableHasLen(mod)) continue; break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { len = arg_len; len_idx = i; } if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| { if (len_val) |v| { if (!(try sema.valuesEqual(arg_val, v, Type.usize))) { const msg = msg: { const msg = try sema.errMsg(src, "non-matching for loop lengths", .{}); errdefer msg.destroy(gpa); const a_src = block.src(.{ .for_input = .{ .for_node_offset = inst_data.src_node, .input_index = len_idx, } }); try sema.errNote(a_src, msg, "length {} here", .{ v.fmtValueSema(pt, sema), }); try sema.errNote(arg_src, msg, "length {} here", .{ arg_val.fmtValueSema(pt, sema), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } else { len = arg_len; len_val = arg_val; len_idx = i; } continue; } runtime_arg_lens[i] = arg_len; any_runtime = true; } if (len == .none) { const msg = msg: { const msg = try sema.errMsg(src, "unbounded for loop", .{}); errdefer msg.destroy(gpa); for (args, 0..) |zir_arg, i_usize| { const i: u32 = @intCast(i_usize); if (zir_arg == .none) continue; const object = try sema.resolveInst(zir_arg); const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } const arg_src = block.src(.{ .for_input = .{ .for_node_offset = inst_data.src_node, .input_index = i, } }); try sema.errNote(arg_src, msg, "type '{}' has no upper bound", .{ object_ty.fmt(pt), }); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } // Now for the runtime checks. if (any_runtime and block.wantSafety()) { for (runtime_arg_lens, 0..) |arg_len, i| { if (arg_len == .none) continue; if (i == len_idx) continue; const ok = try block.addBinOp(.cmp_eq, len, arg_len); try sema.addSafetyCheck(block, src, ok, .for_len_mismatch); } } return len; } /// Given any single pointer, retrieve a pointer to the payload of any optional /// or error union pointed to, initializing these pointers along the way. /// Given a `*E!?T`, returns a (valid) `*T`. /// May invalidate already-stored payload data. fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; var base_ptr = ptr; while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; try sema.checkKnownAllocPtr(block, ptr, base_ptr); return base_ptr; } fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ptr = try sema.resolveInst(un_node.operand); return sema.optEuBasePtrInit(block, ptr, block.nodeOffset(un_node.src_node)); } fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data; const uncoerced_val = try sema.resolveInst(extra.rhs); const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, extra.lhs) catch |err| switch (err) { error.GenericPoison => return uncoerced_val, else => |e| return e, }; const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod); assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction const elem_ty = ptr_ty.childType(mod); switch (ptr_ty.ptrSize(mod)) { .One => { const uncoerced_ty = sema.typeOf(uncoerced_val); if (elem_ty.zigTypeTag(mod) == .Array and elem_ty.childType(mod).toIntern() == uncoerced_ty.toIntern()) { // We're trying to initialize a *[1]T with a reference to a T - don't perform any coercion. return uncoerced_val; } // If the destination type is anyopaque, don't coerce - the pointer will coerce instead. if (elem_ty.toIntern() == .anyopaque_type) { return uncoerced_val; } else { return sema.coerce(block, elem_ty, uncoerced_val, src); } }, .Slice, .Many => { // Our goal is to coerce `uncoerced_val` to an array of `elem_ty`. const val_ty = sema.typeOf(uncoerced_val); switch (val_ty.zigTypeTag(mod)) { .Array, .Vector => {}, else => if (!val_ty.isTuple(mod)) { return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(pt), val_ty.fmt(pt) }); }, } const want_ty = try pt.arrayType(.{ .len = val_ty.arrayLen(mod), .child = elem_ty.toIntern(), .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, }); return sema.coerce(block, want_ty, uncoerced_val, src); }, .C => { // There's nothing meaningful to do here, because we don't know if this is meant to be a // single-pointer or a many-pointer. return uncoerced_val; }, } } fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const un_tok = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok; const src = block.tokenOffset(un_tok.src_tok); // In case of GenericPoison, we don't actually have a type, so this will be // treated as an untyped address-of operator. const operand_air_inst = sema.resolveInst(un_tok.operand) catch |err| switch (err) { error.GenericPoison => return, else => |e| return e, }; const ty_operand = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) { error.GenericPoison => return, else => |e| return e, }; if (ty_operand.isGenericPoison()) return; if (ty_operand.optEuBaseType(mod).zigTypeTag(mod) != .Pointer) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "address-of operator always returns a pointer", .{}); break :msg msg; }); } } fn zirValidateArrayInitRefTy( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.ArrayInitRefTy, pl_node.payload_index).data; const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, extra.ptr_ty) catch |err| switch (err) { error.GenericPoison => return .generic_poison_type, else => |e| return e, }; const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod); assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction switch (mod.intern_pool.indexToKey(ptr_ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice, .Many => { // Use array of correct length const arr_ty = try pt.arrayType(.{ .len = extra.elem_count, .child = ptr_ty.childType(mod).toIntern(), .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, }); return Air.internedToRef(arr_ty.toIntern()); }, else => {}, }, else => {}, } // Otherwise, we just want the pointer child type const ret_ty = ptr_ty.childType(mod); if (ret_ty.toIntern() == .anyopaque_type) { // The actual array type is unknown, which we represent with a generic poison. return .generic_poison_type; } const arr_ty = ret_ty.optEuBaseType(mod); try sema.validateArrayInitTy(block, src, src, extra.elem_count, arr_ty); return Air.internedToRef(ret_ty.toIntern()); } fn zirValidateArrayInitTy( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_result_ty: bool, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const ty_src: LazySrcLoc = if (is_result_ty) src else block.src(.{ .node_offset_init_ty = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = sema.resolveType(block, ty_src, extra.ty) catch |err| switch (err) { // It's okay for the type to be unknown: this will result in an anonymous array init. error.GenericPoison => return, else => |e| return e, }; const arr_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty; return sema.validateArrayInitTy(block, src, ty_src, extra.init_count, arr_ty); } fn validateArrayInitTy( sema: *Sema, block: *Block, src: LazySrcLoc, ty_src: LazySrcLoc, init_count: u32, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Array => { const array_len = ty.arrayLen(mod); if (init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, init_count, }); } return; }, .Vector => { const array_len = ty.arrayLen(mod); if (init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, init_count, }); } return; }, .Struct => if (ty.isTuple(mod)) { try ty.resolveFields(pt); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, init_count, }); } return; }, else => {}, } return sema.failWithArrayInitNotSupported(block, ty_src, ty); } fn zirValidateStructInitTy( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_result_ty: bool, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { // It's okay for the type to be unknown: this will result in an anonymous struct init. error.GenericPoison => return, else => |e| return e, }; const struct_ty = if (is_result_ty) ty.optEuBaseType(mod) else ty; switch (struct_ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } return sema.failWithStructInitNotSupported(block, src, struct_ty); } fn zirValidatePtrStructInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const init_src = block.nodeOffset(validate_inst.src_node); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.bodySlice(validate_extra.end, validate_extra.data.body_len); const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(mod).optEuBaseType(mod); switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, init_src, instrs, ), .Union => return sema.validateUnionInit( block, agg_ty, init_src, instrs, object_ptr, ), else => unreachable, } } fn validateUnionInit( sema: *Sema, block: *Block, union_ty: Type, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; if (instrs.len != 1) { const msg = msg: { const msg = try sema.errMsg( init_src, "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ); errdefer msg.destroy(gpa); for (instrs[1..]) |inst| { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const inst_src = block.src(.{ .node_offset_initializer = inst_data.src_node }); try sema.errNote(inst_src, msg, "additional initializer here", .{}); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, union_ptr)) != null) { // In this case, comptime machinery already did everything. No work to do here. return; } const field_ptr = instrs[0]; const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node; const field_src = block.src(.{ .node_offset_initializer = field_ptr_data.src_node }); const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( gpa, pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); const field_ptr_ref = sema.inst_map.get(field_ptr).?; // Our task here is to determine if the union is comptime-known. In such case, // we erase the runtime AIR instructions for initializing the union, and replace // the mapping with the comptime value. Either way, we will need to populate the tag. // We expect to see something like this in the current block AIR: // %a = alloc(*const U) // %b = bitcast(*U, %a) // %c = field_ptr(..., %b) // %e!= store(%c!, %d!) // If %d is a comptime operand, the union is comptime. // If the union is comptime, we want `first_block_index` // to point at %c so that the bitcast becomes the last instruction in the block. // // Store instruction may be missing; if field type has only one possible value, this case is handled below. // // In the case of a comptime-known pointer to a union, the // the field_ptr instruction is missing, so we have to pattern-match // based only on the store instructions. // `first_block_index` needs to point to the `field_ptr` if it exists; // the `store` otherwise. var first_block_index = block.instructions.items.len; var block_index = block.instructions.items.len - 1; var init_val: ?Value = null; var init_ref: ?Air.Inst.Ref = null; while (block_index > 0) : (block_index -= 1) { const store_inst = block.instructions.items[block_index]; if (store_inst.toRef() == field_ptr_ref) { first_block_index = block_index; break; } switch (air_tags[@intFromEnum(store_inst)]) { .store, .store_safe => {}, else => continue, } const bin_op = air_datas[@intFromEnum(store_inst)].bin_op; var ptr_ref = bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref != field_ptr_ref) continue; first_block_index = @min(if (field_ptr_ref.toIndex()) |field_ptr_inst| std.mem.lastIndexOfScalar( Air.Inst.Index, block.instructions.items[0..block_index], field_ptr_inst, ).? else block_index, first_block_index); init_ref = bin_op.rhs; init_val = try sema.resolveValue(bin_op.rhs); break; } const tag_ty = union_ty.unionTagTypeHypothetical(mod); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const field_type = union_ty.unionFieldType(tag_val, mod).?; if (try sema.typeHasOnePossibleValue(field_type)) |field_only_value| { init_val = field_only_value; } if (init_val) |val| { // Our task is to delete all the `field_ptr` and `store` instructions, and insert // instead a single `store` to the result ptr with a comptime union value. block_index = first_block_index; for (block.instructions.items[first_block_index..]) |cur_inst| { switch (air_tags[@intFromEnum(cur_inst)]) { .struct_field_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3, => if (cur_inst.toRef() == field_ptr_ref) continue, .bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == field_ptr_ref) continue, .store, .store_safe => { var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref == field_ptr_ref) continue; }, else => {}, } block.instructions.items[block_index] = cur_inst; block_index += 1; } block.instructions.shrinkRetainingCapacity(block_index); const union_val = try pt.intern(.{ .un = .{ .ty = union_ty.toIntern(), .tag = tag_val.toIntern(), .val = val.toIntern(), } }); const union_init = Air.internedToRef(union_val); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { return sema.failWithNeededComptime(block, block.nodeOffset(field_ptr_data.src_node), .{ .needed_comptime_reason = "initializer of comptime only union must be comptime-known", }); } if (init_ref) |v| try sema.validateRuntimeValue(block, block.nodeOffset(field_ptr_data.src_node), v); const new_tag = Air.internedToRef(tag_val.toIntern()); const set_tag_inst = try block.addBinOp(.set_union_tag, union_ptr, new_tag); try sema.checkComptimeKnownStore(block, set_tag_inst, LazySrcLoc.unneeded); // `unneeded` since this isn't a "proper" store } fn validateStructInit( sema: *Sema, block: *Block, struct_ty: Type, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const field_indices = try gpa.alloc(u32, instrs.len); defer gpa.free(field_indices); // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.OptionalIndex, struct_ty.structFieldCount(mod)); defer gpa.free(found_fields); @memset(found_fields, .none); var struct_ptr_zir_ref: Zir.Inst.Ref = undefined; for (instrs, field_indices) |field_ptr, *field_index| { const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node; const field_src = block.src(.{ .node_offset_initializer = field_ptr_data.src_node }); const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = try ip.getOrPutString( gpa, pt.tid, sema.code.nullTerminatedString(field_ptr_extra.field_name_start), .no_embedded_nulls, ); field_index.* = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else try sema.structFieldIndex(block, struct_ty, field_name, field_src); assert(found_fields[field_index.*] == .none); found_fields[field_index.*] = field_ptr.toOptional(); } var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); const struct_ptr = try sema.resolveInst(struct_ptr_zir_ref); if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { try struct_ty.resolveLayout(pt); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. for (found_fields, 0..) |field_ptr, i_usize| { const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; try struct_ty.resolveStructFieldInits(pt); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(init_src, template, .{i}); } continue; }; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, args); } else { root_msg = try sema.errMsg(init_src, template, args); } continue; } const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true); const init = Air.internedToRef(default_val.toIntern()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, struct_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } return; } var fields_allow_runtime = true; var struct_is_comptime = true; var first_block_index = block.instructions.items.len; const require_comptime = try sema.typeRequiresComptime(struct_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); try struct_ty.resolveStructFieldInits(pt); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |opt_field_ptr, i_usize| { const i: u32 = @intCast(i_usize); if (opt_field_ptr.unwrap()) |field_ptr| { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { field_values[i] = opv.toIntern(); continue; } const field_ptr_ref = sema.inst_map.get(field_ptr).?; //std.debug.print("validateStructInit (field_ptr_ref=%{d}):\n", .{field_ptr_ref}); //for (block.instructions.items) |item| { // std.debug.print(" %{d} = {s}\n", .{item, @tagName(air_tags[@intFromEnum(item)])}); //} // We expect to see something like this in the current block AIR: // %a = field_ptr(...) // store(%a, %b) // With an optional bitcast between the store and the field_ptr. // If %b is a comptime operand, this field is comptime. // // However, in the case of a comptime-known pointer to a struct, the // the field_ptr instruction is missing, so we have to pattern-match // based only on the store instructions. // `first_block_index` needs to point to the `field_ptr` if it exists; // the `store` otherwise. // Possible performance enhancement: save the `block_index` between iterations // of the for loop. var block_index = block.instructions.items.len; while (block_index > 0) { block_index -= 1; const store_inst = block.instructions.items[block_index]; if (store_inst.toRef() == field_ptr_ref) { struct_is_comptime = false; continue :field; } switch (air_tags[@intFromEnum(store_inst)]) { .store, .store_safe => {}, else => continue, } const bin_op = air_datas[@intFromEnum(store_inst)].bin_op; var ptr_ref = bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref != field_ptr_ref) continue; first_block_index = @min(if (field_ptr_ref.toIndex()) |field_ptr_inst| std.mem.lastIndexOfScalar( Air.Inst.Index, block.instructions.items[0..block_index], field_ptr_inst, ).? else block_index, first_block_index); if (!sema.checkRuntimeValue(bin_op.rhs)) fields_allow_runtime = false; if (try sema.resolveValue(bin_op.rhs)) |val| { field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(field_ptr)].pl_node; return sema.failWithNeededComptime(block, block.nodeOffset(field_ptr_data.src_node), .{ .needed_comptime_reason = "initializer of comptime only struct must be comptime-known", }); } else { struct_is_comptime = false; } continue :field; } struct_is_comptime = false; continue :field; } const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(init_src, template, .{i}); } continue; }; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, args); } else { root_msg = try sema.errMsg(init_src, template, args); } continue; } field_values[i] = default_val.toIntern(); } if (!struct_is_comptime and !fields_allow_runtime and root_msg == null) { root_msg = try sema.errMsg(init_src, "runtime value contains reference to comptime var", .{}); try sema.errNote(init_src, root_msg.?, "comptime var pointers are not available at runtime", .{}); } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, struct_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } if (struct_is_comptime) { // Our task is to delete all the `field_ptr` and `store` instructions, and insert // instead a single `store` to the struct_ptr with a comptime struct value. var init_index: usize = 0; var field_ptr_ref = Air.Inst.Ref.none; var block_index = first_block_index; for (block.instructions.items[first_block_index..]) |cur_inst| { while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) { const field_ty = struct_ty.structFieldType(field_indices[init_index], mod); if (try field_ty.onePossibleValue(pt)) |_| continue; field_ptr_ref = sema.inst_map.get(instrs[init_index]).?; } switch (air_tags[@intFromEnum(cur_inst)]) { .struct_field_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3, => if (cur_inst.toRef() == field_ptr_ref) continue, .bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == field_ptr_ref) continue, .store, .store_safe => { var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref == field_ptr_ref) { field_ptr_ref = .none; continue; } }, else => {}, } block.instructions.items[block_index] = cur_inst; block_index += 1; } block.instructions.shrinkRetainingCapacity(block_index); const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = field_values }, } }); const struct_init = Air.internedToRef(struct_val); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } try struct_ty.resolveLayout(pt); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { if (field_ptr != .none) continue; const field_src = init_src; // TODO better source location const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true); try sema.checkKnownAllocPtr(block, struct_ptr, default_field_ptr); const init = Air.internedToRef(field_values[i]); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } fn zirValidatePtrArrayInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const validate_inst = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const init_src = block.nodeOffset(validate_inst.src_node); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.bodySlice(validate_extra.end, validate_extra.data.body_len); const first_elem_ptr_data = sema.code.instructions.items(.data)[@intFromEnum(instrs[0])].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); const array_ty = sema.typeOf(array_ptr).childType(mod).optEuBaseType(mod); const array_len = array_ty.arrayLen(mod); // Collect the comptime element values in case the array literal ends up // being comptime-known. const element_vals = try sema.arena.alloc( InternPool.Index, try sema.usizeCast(block, init_src, array_len), ); if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); try array_ty.resolveStructFieldInits(pt); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); if (default_val == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(init_src, template, .{i}); } continue; } element_vals[i] = default_val; } if (root_msg) |msg| { root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } }, .Array => { return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{ array_len, instrs.len, }); }, .Vector => { return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{ array_len, instrs.len, }); }, else => unreachable, }; if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, array_ptr)) != null) { // In this case the comptime machinery will have evaluated the store instructions // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try pt.intRef(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); const sentinel = Air.internedToRef(sentinel_val.toIntern()); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; } // If the array has one possible value, the value is always comptime-known. if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| { const array_init = Air.internedToRef(array_opv.toIntern()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); return; } var array_is_comptime = true; var first_block_index = block.instructions.items.len; const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); outer: for (instrs, 0..) |elem_ptr, i| { // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) try array_ty.resolveStructFieldInits(pt); if (try array_ty.structFieldValueComptime(pt, i)) |opv| { element_vals[i] = opv.toIntern(); continue; } } const elem_ptr_ref = sema.inst_map.get(elem_ptr).?; // We expect to see something like this in the current block AIR: // %a = elem_ptr(...) // store(%a, %b) // With an optional bitcast between the store and the elem_ptr. // If %b is a comptime operand, this element is comptime. // // However, in the case of a comptime-known pointer to an array, the // the elem_ptr instruction is missing, so we have to pattern-match // based only on the store instructions. // `first_block_index` needs to point to the `elem_ptr` if it exists; // the `store` otherwise. // // This is nearly identical to similar logic in `validateStructInit`. // Possible performance enhancement: save the `block_index` between iterations // of the for loop. var block_index = block.instructions.items.len; while (block_index > 0) { block_index -= 1; const store_inst = block.instructions.items[block_index]; if (store_inst.toRef() == elem_ptr_ref) { array_is_comptime = false; continue :outer; } switch (air_tags[@intFromEnum(store_inst)]) { .store, .store_safe => {}, else => continue, } const bin_op = air_datas[@intFromEnum(store_inst)].bin_op; var ptr_ref = bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref != elem_ptr_ref) continue; first_block_index = @min(if (elem_ptr_ref.toIndex()) |elem_ptr_inst| std.mem.lastIndexOfScalar( Air.Inst.Index, block.instructions.items[0..block_index], elem_ptr_inst, ).? else block_index, first_block_index); if (try sema.resolveValue(bin_op.rhs)) |val| { element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } continue :outer; } array_is_comptime = false; continue :outer; } if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.base_addr) { .comptime_field => return, // This store was validated by the individual elem ptrs. else => {}, }, else => {}, } } // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. var elem_index: usize = 0; var elem_ptr_ref = Air.Inst.Ref.none; var block_index = first_block_index; for (block.instructions.items[first_block_index..]) |cur_inst| { while (elem_ptr_ref == .none and elem_index < instrs.len) : (elem_index += 1) { if (array_ty.isTuple(mod) and array_ty.structFieldIsComptime(elem_index, mod)) continue; elem_ptr_ref = sema.inst_map.get(instrs[elem_index]).?; } switch (air_tags[@intFromEnum(cur_inst)]) { .ptr_elem_ptr => if (cur_inst.toRef() == elem_ptr_ref) continue, .bitcast => if (air_datas[@intFromEnum(cur_inst)].ty_op.operand == elem_ptr_ref) continue, .store, .store_safe => { var ptr_ref = air_datas[@intFromEnum(cur_inst)].bin_op.lhs; if (ptr_ref.toIndex()) |ptr_inst| if (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; }; if (ptr_ref == elem_ptr_ref) { elem_ptr_ref = .none; continue; } }, else => {}, } block.instructions.items[block_index] = cur_inst; block_index += 1; } block.instructions.shrinkRetainingCapacity(block_index); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); const array_init = Air.internedToRef(array_val); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(pt)}); } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(pt)}), .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(pt)}), } if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) { return sema.fail(block, src, "cannot dereference undefined value", .{}); } } else if (try sema.typeRequiresComptime(elem_ty)) { const msg = msg: { const msg = try sema.errMsg( src, "values of type '{}' must be comptime-known, but operand value is runtime-known", .{elem_ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, src, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ValidateDestructure, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const destructure_src = block.nodeOffset(extra.destructure_node); const operand = try sema.resolveInst(extra.operand); const operand_ty = sema.typeOf(operand); const can_destructure = switch (operand_ty.zigTypeTag(mod)) { .Array, .Vector => true, .Struct => operand_ty.isTuple(mod), else => false, }; if (!can_destructure) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "type '{}' cannot be destructured", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(destructure_src, msg, "result destructured here", .{}); break :msg msg; }); } if (operand_ty.arrayLen(mod) != extra.expect_len) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "expected {} elements for destructure, found {}", .{ extra.expect_len, operand_ty.arrayLen(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(destructure_src, msg, "result destructured here", .{}); break :msg msg; }); } } fn failWithBadMemberAccess( sema: *Sema, block: *Block, agg_ty: Type, field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const kw_name = switch (agg_ty.zigTypeTag(zcu)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; if (agg_ty.typeDeclInst(zcu)) |inst| if ((inst.resolve(ip) orelse return error.AnalysisFail) == .main_struct_inst) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ agg_ty.fmt(pt), field_name.fmt(ip), }); }; return sema.fail(block, field_src, "{s} '{}' has no member named '{}'", .{ kw_name, agg_ty.fmt(pt), field_name.fmt(ip), }); } fn failWithBadStructFieldAccess( sema: *Sema, block: *Block, struct_ty: Type, struct_type: InternPool.LoadedStructType, field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in struct '{}'", .{ field_name.fmt(ip), struct_type.name.fmt(ip) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(struct_ty.srcLoc(zcu), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn failWithBadUnionFieldAccess( sema: *Sema, block: *Block, union_ty: Type, union_obj: InternPool.LoadedUnionType, field_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in union '{}'", .{ field_name.fmt(ip), union_obj.name.fmt(ip) }, ); errdefer msg.destroy(gpa); try sema.errNote(union_ty.srcLoc(zcu), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { const zcu = sema.pt.zcu; const src_loc = decl_ty.srcLocOrNull(zcu) orelse return; const category = switch (decl_ty.zigTypeTag(zcu)) { .Union => "union", .Struct => "struct", .Enum => "enum", .Opaque => "opaque", else => unreachable, }; try sema.errNote(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(pl_node.src_node); const bin = sema.code.extraData(Zir.Inst.Bin, pl_node.payload_index).data; const ptr = try sema.resolveInst(bin.lhs); const operand = try sema.resolveInst(bin.rhs); const ptr_inst = ptr.toIndex().?; const air_datas = sema.air_instructions.items(.data); switch (sema.air_instructions.items(.tag)[@intFromEnum(ptr_inst)]) { .inferred_alloc_comptime => { const iac = &air_datas[@intFromEnum(ptr_inst)].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; return sema.storeToInferredAlloc(block, src, ptr, operand, ia); }, else => unreachable, } } fn storeToInferredAlloc( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, operand: Air.Inst.Ref, inferred_alloc: *InferredAlloc, ) CompileError!void { // Create a store instruction as a placeholder. This will be replaced by a // proper store sequence once we know the stored type. const dummy_store = try block.addBinOp(.store, ptr, operand); try sema.checkComptimeKnownStore(block, dummy_store, src); // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.prongs.append(sema.arena, dummy_store.toIndex().?); } fn storeToInferredAllocComptime( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, iac: *Air.Inst.Data.InferredAllocComptime, ) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const operand_ty = sema.typeOf(operand); // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl or a ComptimeAlloc. const operand_val = try sema.resolveValue(operand) orelse { return sema.failWithNeededComptime(block, src, .{ .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; const alloc_ty = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, .is_const = iac.is_const, }, }); if (iac.is_const and !operand_val.canMutateComptimeVarState(zcu)) { iac.ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .uav = .{ .val = operand_val.toIntern(), .orig_ty = alloc_ty.toIntern(), } }, .byte_offset = 0, } }); } else { const alloc_index = try sema.newComptimeAlloc(block, operand_ty, iac.alignment); sema.getComptimeAlloc(alloc_index).val = .{ .interned = operand_val.toIntern() }; iac.ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), .base_addr = .{ .comptime_alloc = alloc_index }, .byte_offset = 0, } }); } } fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const quota: u32 = @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, .{ .needed_comptime_reason = "eval branch quota must be comptime-known", })); sema.branch_quota = @max(sema.branch_quota, quota); } fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ptr = try sema.resolveInst(extra.lhs); const operand = try sema.resolveInst(extra.rhs); const is_ret = if (extra.lhs.toIndex()) |ptr_index| zir_tags[@intFromEnum(ptr_index)] == .ret_ptr else false; // Check for the possibility of this pattern: // %a = ret_ptr // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(mod)) { .ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand), else => {}, }; const ptr_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node }); const operand_src = block.src(.{ .node_offset_store_operand = inst_data.src_node }); const air_tag: Air.Inst.Tag = if (is_ret) .ret_ptr else if (block.wantSafety()) .store_safe else .store; return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag); } fn zirStr(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const bytes = sema.code.instructions.items(.data)[@intFromEnum(inst)].str.get(sema.code); return sema.addStrLit( try sema.pt.zcu.intern_pool.getOrPutString(sema.gpa, sema.pt.tid, bytes, .maybe_embedded_nulls), bytes.len, ); } fn addNullTerminatedStrLit(sema: *Sema, string: InternPool.NullTerminatedString) CompileError!Air.Inst.Ref { return sema.addStrLit(string.toString(), string.length(&sema.pt.zcu.intern_pool)); } fn addStrLit(sema: *Sema, string: InternPool.String, len: u64) CompileError!Air.Inst.Ref { const pt = sema.pt; const array_ty = try pt.arrayType(.{ .len = len, .sentinel = .zero_u8, .child = .u8_type, }); const val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .bytes = string }, } }); return sema.uavRef(val); } fn uavRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { return Air.internedToRef(try sema.refValue(val)); } fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { const pt = sema.pt; const ptr_ty = (try pt.ptrTypeSema(.{ .child = pt.zcu.intern_pool.typeOf(val), .flags = .{ .alignment = .none, .is_const = true, .address_space = .generic, }, })).toIntern(); return pt.intern(.{ .ptr = .{ .ty = ptr_ty, .base_addr = .{ .uav = .{ .val = val, .orig_ty = ptr_ty, } }, .byte_offset = 0, } }); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int; return sema.pt.intRef(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[@intFromEnum(int.start)..][0..byte_count]; // TODO: this allocation and copy is only needed because the limbs may be unaligned. // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these // two lines can be removed. const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{ .limbs = limbs, .positive = true, })).toIntern()); } fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float; return Air.internedToRef((try sema.pt.floatValue( Type.comptime_float, number, )).toIntern()); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern()); } fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const msg = try sema.resolveConstString(block, operand_src, inst_data.operand, .{ .needed_comptime_reason = "compile error string must be comptime-known", }); return sema.fail(block, src, "{s}", .{msg}); } fn zirCompileLog( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; var managed = mod.compile_log_text.toManaged(sema.gpa); defer pt.zcu.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src_node = extra.data.src_node; const args = sema.code.refSlice(extra.end, extended.small); for (args, 0..) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); const arg = try sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); if (try sema.resolveValueResolveLazy(arg)) |val| { try writer.print("@as({}, {})", .{ arg_ty.fmt(pt), val.fmtValueSema(pt, sema), }); } else { try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(pt)}); } } try writer.print("\n", .{}); const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.owner); if (!gop.found_existing) gop.value_ptr.* = .{ .base_node_inst = block.src_base_inst, .node_offset = src_node, }; return .void_value; } fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const msg_inst = try sema.resolveInst(inst_data.operand); // `panicWithMsg` would perform this coercion for us, but we can get a better // source location if we do it here. const coerced_msg = try sema.coerce(block, Type.slice_const_u8, msg_inst, block.builtinCallArgSrc(inst_data.src_node, 0)); if (block.is_comptime) { return sema.fail(block, src, "encountered @panic at comptime", .{}); } try sema.panicWithMsg(block, src, coerced_msg, .@"@panic"); } fn zirTrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const src_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].node; const src = block.nodeOffset(src_node); if (block.is_comptime) return sema.fail(block, src, "encountered @trap at comptime", .{}); _ = try block.addNoOp(.trap); } fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const gpa = sema.gpa; // AIR expects a block outside the loop block too. // Reserve space for a Loop instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); const loop_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(block_inst) + 1); try sema.air_instructions.ensureUnusedCapacity(gpa, 2); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = undefined, }); sema.air_instructions.appendAssumeCapacity(.{ .tag = .loop, .data = .{ .ty_pl = .{ .ty = .noreturn_type, .payload = undefined, } }, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block = parent_block.makeSubBlock(); child_block.label = &label; child_block.runtime_cond = null; child_block.runtime_loop = src; child_block.runtime_index.increment(); const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); var loop_block = child_block.makeSubBlock(); defer loop_block.instructions.deinit(gpa); // Use `analyzeBodyInner` directly to push any comptime control flow up the stack. try sema.analyzeBodyInner(&loop_block, body); const loop_block_len = loop_block.instructions.items.len; if (loop_block_len > 0 and sema.typeOf(loop_block.instructions.items[loop_block_len - 1].toRef()).isNoReturn(mod)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); } else { try child_block.instructions.append(gpa, loop_inst); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len); sema.air_instructions.items(.data)[@intFromEnum(loop_inst)].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @intCast(loop_block_len) }, ); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(loop_block.instructions.items)); } return sema.resolveAnalyzedBlock(parent_block, src, &child_block, merges, false); } fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; const gpa = sema.gpa; const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); // we check this here to avoid undefined symbols if (!build_options.have_llvm) return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{}); var c_import_buf = std.ArrayList(u8).init(gpa); defer c_import_buf.deinit(); const comptime_reason: Block.ComptimeReason = .{ .c_import = .{ .src = src } }; var child_block: Block = .{ .parent = parent_block, .sema = sema, .namespace = parent_block.namespace, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = true, .comptime_reason = &comptime_reason, .c_import_buf = &c_import_buf, .runtime_cond = parent_block.runtime_cond, .runtime_loop = parent_block.runtime_loop, .runtime_index = parent_block.runtime_index, .src_base_inst = parent_block.src_base_inst, .type_name_ctx = parent_block.type_name_ctx, }; defer child_block.instructions.deinit(gpa); _ = try sema.analyzeInlineBody(&child_block, body, inst); var c_import_res = comp.cImport(c_import_buf.items, parent_block.ownerModule()) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); defer c_import_res.deinit(gpa); if (c_import_res.errors.errorMessageCount() != 0) { const msg = msg: { const msg = try sema.errMsg(src, "C import failed", .{}); errdefer msg.destroy(gpa); if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); const gop = try zcu.cimport_errors.getOrPut(gpa, sema.owner); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; } break :msg msg; }; return sema.failWithOwnedErrorMsg(&child_block, msg); } const parent_mod = parent_block.ownerModule(); const c_import_mod = Package.Module.create(comp.arena, .{ .global_cache_directory = comp.global_cache_directory, .paths = .{ .root = .{ .root_dir = Compilation.Directory.cwd(), .sub_path = std.fs.path.dirname(c_import_res.out_zig_path) orelse "", }, .root_src_path = std.fs.path.basename(c_import_res.out_zig_path), }, .fully_qualified_name = c_import_res.out_zig_path, .cc_argv = parent_mod.cc_argv, .inherited = .{}, .global = comp.config, .parent = parent_mod, .builtin_mod = parent_mod.getBuiltinDependency(), .builtin_modules = null, // `builtin_mod` is set }) catch |err| switch (err) { // None of these are possible because we are creating a package with // the exact same configuration as the parent package, which already // passed these checks. error.ValgrindUnsupportedOnTarget => unreachable, error.TargetRequiresSingleThreaded => unreachable, error.BackendRequiresSingleThreaded => unreachable, error.TargetRequiresPic => unreachable, error.PieRequiresPic => unreachable, error.DynamicLinkingRequiresPic => unreachable, error.TargetHasNoRedZone => unreachable, error.StackCheckUnsupportedByTarget => unreachable, error.StackProtectorUnsupportedByTarget => unreachable, error.StackProtectorUnavailableWithoutLibC => unreachable, else => |e| return e, }; const result = pt.importPkg(c_import_mod) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); pt.astGenFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); // TODO: register some kind of dependency on the file. // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(inst_data.src_node); return sema.failWithUseOfAsync(parent_block, src); } fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_comptime: bool) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(pl_node.src_node); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const gpa = sema.gpa; // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated or is an unlabeled block. const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = parent_block, .sema = sema, .namespace = parent_block.namespace, .instructions = .{}, .label = &label, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime or force_comptime, .comptime_reason = parent_block.comptime_reason, .is_typeof = parent_block.is_typeof, .want_safety = parent_block.want_safety, .float_mode = parent_block.float_mode, .c_import_buf = parent_block.c_import_buf, .runtime_cond = parent_block.runtime_cond, .runtime_loop = parent_block.runtime_loop, .runtime_index = parent_block.runtime_index, .error_return_trace_index = parent_block.error_return_trace_index, .src_base_inst = parent_block.src_base_inst, .type_name_ctx = parent_block.type_name_ctx, }; defer child_block.instructions.deinit(gpa); defer label.merges.deinit(gpa); return sema.resolveBlockBody(parent_block, src, &child_block, body, inst, &label.merges); } /// Semantically analyze the given ZIR body, emitting any resulting runtime code into the AIR block /// specified by `child_block` if necessary (and emitting this block into `parent_block`). /// TODO: `merges` is known from `child_block`, remove this parameter. fn resolveBlockBody( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, body: []const Zir.Inst.Index, /// This is the instruction that a break instruction within `body` can /// use to return from the body. body_inst: Zir.Inst.Index, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { if (child_block.is_comptime) { return sema.resolveInlineBody(child_block, body, body_inst); } else { assert(sema.air_instructions.items(.tag)[@intFromEnum(merges.block_inst)] == .block); var need_debug_scope = false; child_block.need_debug_scope = &need_debug_scope; if (sema.analyzeBodyInner(child_block, body)) |_| { return sema.resolveAnalyzedBlock(parent_block, src, child_block, merges, need_debug_scope); } else |err| switch (err) { error.ComptimeBreak => { // Comptime control flow is happening, however child_block may still contain // runtime instructions which need to be copied to the parent block. if (need_debug_scope and child_block.instructions.items.len > 0) { // We need a runtime block for scoping reasons. _ = try child_block.addBr(merges.block_inst, .void_value); try parent_block.instructions.append(sema.gpa, merges.block_inst); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(child_block.instructions.items.len), }), } }; sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items)); } else { // We can copy instructions directly to the parent block. try parent_block.instructions.appendSlice(sema.gpa, child_block.instructions.items); } const break_inst = sema.comptime_break_inst; const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; if (extra.block_inst == body_inst) { return try sema.resolveInst(break_data.operand); } else { return error.ComptimeBreak; } }, else => |e| return e, } } } /// After a body corresponding to an AIR `block` has been analyzed, this function places them into /// the block pointed at by `merges.block_inst` if necessary, or the block may be elided in favor of /// inlining the instructions directly into the parent block. Either way, it considers all merges of /// this block, and combines them appropriately using peer type resolution, returning the final /// value of the block. fn resolveAnalyzedBlock( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, merges: *Block.Merges, need_debug_scope: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; const pt = sema.pt; const mod = pt.zcu; // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(sema.typeOf(child_block.instructions.items[child_block.instructions.items.len - 1].toRef()).isNoReturn(mod)); const block_tag = sema.air_instructions.items(.tag)[@intFromEnum(merges.block_inst)]; switch (block_tag) { .block => {}, .dbg_inline_block => assert(need_debug_scope), else => unreachable, } if (merges.results.items.len == 0) { switch (block_tag) { .block => { // No need for a block instruction. We can put the new instructions // directly into the parent block. if (need_debug_scope) { // The code following this block is unreachable, as the block has no // merges, so we don't necessarily need to emit this as an AIR block. // However, we need a block *somewhere* to make the scoping correct, // so forward this request to the parent block. if (parent_block.need_debug_scope) |ptr| ptr.* = true; } try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); return child_block.instructions.items[child_block.instructions.items.len - 1].toRef(); }, .dbg_inline_block => { // Create a block containing all instruction from the body. try parent_block.instructions.append(gpa, merges.block_inst); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = .noreturn_type, .payload = sema.addExtraAssumeCapacity(Air.DbgInlineBlock{ .func = child_block.inlining.?.func, .body_len = @intCast(child_block.instructions.items.len), }), } }; sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items)); return merges.block_inst.toRef(); }, else => unreachable, } } if (merges.results.items.len == 1) { // If the `break` is trailing, we may be able to elide the AIR block here // by appending the new instructions directly to the parent block. if (!need_debug_scope) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // Great, the last instruction is the break! Put the instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items[0..last_inst_index]); return merges.results.items[0]; } } } // Okay, we need a runtime block. If the value is comptime-known, the // block should just return void, and we return the merge result // directly. Otherwise, we can defer to the logic below. if (try sema.resolveValue(merges.results.items[0])) |result_val| { // Create a block containing all instruction from the body. try parent_block.instructions.append(gpa, merges.block_inst); switch (block_tag) { .block => { try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(child_block.instructions.items.len), }), } }; }, .dbg_inline_block => { try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.DbgInlineBlock{ .func = child_block.inlining.?.func, .body_len = @intCast(child_block.instructions.items.len), }), } }; }, else => unreachable, } sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items)); // Rewrite the break to just give value {}; the value is // comptime-known and will be returned directly. sema.air_instructions.items(.data)[@intFromEnum(merges.br_list.items[0])].br.operand = .void_value; return Air.internedToRef(result_val.toIntern()); } } // It is impossible to have the number of results be > 1 in a comptime scope. assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition. // Note that we'll always create an AIR block here, so `need_debug_scope` is irrelevant. // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. try parent_block.instructions.append(gpa, merges.block_inst); const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .{ .override = merges.src_locs.items }); // TODO add note "missing else causes void value" const type_src = src; // TODO: better source location if (try sema.typeRequiresComptime(resolved_ty)) { const msg = msg: { const msg = try sema.errMsg(type_src, "value with comptime-only type '{}' depends on runtime control flow", .{resolved_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); const runtime_src = child_block.runtime_cond orelse child_block.runtime_loop.?; try sema.errNote(runtime_src, msg, "runtime control flow here", .{}); try sema.explainWhyTypeIsComptime(msg, type_src, resolved_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(child_block, msg); } for (merges.results.items, merges.src_locs.items) |merge_inst, merge_src| { try sema.validateRuntimeValue(child_block, merge_src orelse src, merge_inst); } const ty_inst = Air.internedToRef(resolved_ty.toIntern()); switch (block_tag) { .block => { try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(child_block.instructions.items.len), }), } }; }, .dbg_inline_block => { try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.DbgInlineBlock{ .func = child_block.inlining.?.func, .body_len = @intCast(child_block.instructions.items.len), }), } }; }, else => unreachable, } sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items)); // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { const br_operand = sema.air_instructions.items(.data)[@intFromEnum(br)].br.operand; const br_operand_src = src; const br_operand_ty = sema.typeOf(br_operand); if (br_operand_ty.eql(resolved_ty, mod)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { sema.air_instructions.items(.data)[@intFromEnum(br)].br.operand = coerced_operand; continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1].toRef() == coerced_operand); // Convert the br instruction to a block instruction that has the coercion // and then a new br inside that returns the coerced instruction. const sub_block_len: u32 = @intCast(coerce_block.instructions.items.len + 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + sub_block_len); try sema.air_instructions.ensureUnusedCapacity(gpa, 1); const sub_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); sema.air_instructions.items(.tag)[@intFromEnum(br)] = .block; sema.air_instructions.items(.data)[@intFromEnum(br)] = .{ .ty_pl = .{ .ty = .noreturn_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = sub_block_len, }), } }; sema.air_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); sema.air_extra.appendAssumeCapacity(@intFromEnum(sub_br_inst)); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = merges.block_inst, .operand = coerced_operand, } }, }); } return merges.block_inst.toRef(); } fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); const decl_name = try ip.getOrPutString( zcu.gpa, pt.tid, sema.code.nullTerminatedString(extra.decl_name), .no_embedded_nulls, ); const nav_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); const container_namespace = container_ty.getNamespaceIndex(zcu); const lookup = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false) orelse return sema.failWithBadMemberAccess(block, container_ty, operand_src, decl_name); break :index_blk lookup.nav; } else try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); try sema.ensureNavResolved(src, nav_index); // Make sure to export the owner Nav if applicable. const exported_nav = switch (ip.indexToKey(ip.getNav(nav_index).status.resolved.val)) { .variable => |v| v.owner_nav, .@"extern" => |e| e.owner_nav, .func => |f| f.owner_nav, else => nav_index, }; try sema.analyzeExport(block, src, options, exported_nav); } fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); const operand = try sema.resolveInstConst(block, operand_src, extra.operand, .{ .needed_comptime_reason = "export target must be comptime-known", }); const options = try sema.resolveExportOptions(block, options_src, extra.options); if (options.linkage == .internal) return; // If the value has an owner Nav, export that instead. const maybe_owner_nav = switch (ip.indexToKey(operand.toIntern())) { .variable => |v| v.owner_nav, .@"extern" => |e| e.owner_nav, .func => |f| f.owner_nav, else => null, }; if (maybe_owner_nav) |owner_nav| { return sema.analyzeExport(block, src, options, owner_nav); } else { try sema.exports.append(zcu.gpa, .{ .opts = options, .src = src, .exported = .{ .uav = operand.toIntern() }, .status = .in_progress, }); } } pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, options: Module.Export.Options, exported_nav_index: InternPool.Nav.Index, ) !void { const gpa = sema.gpa; const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; if (options.linkage == .internal) return; try sema.ensureNavResolved(src, exported_nav_index); const exported_nav = ip.getNav(exported_nav_index); const export_ty = Type.fromInterned(exported_nav.typeOf(ip)); if (!try sema.validateExternType(export_ty, .other)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, export_ty, .other); try sema.addDeclaredHereNote(msg, export_ty); break :msg msg; }); } // TODO: some backends might support re-exporting extern decls if (exported_nav.isExtern(ip)) { return sema.fail(block, src, "export target cannot be extern", .{}); } try sema.maybeQueueFuncBodyAnalysis(src, exported_nav_index); try sema.exports.append(gpa, .{ .opts = options, .src = src, .exported = .{ .nav = exported_nav_index }, .status = .in_progress, }); } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src = block.builtinCallArgSrc(extra.node, 0); const src = block.nodeOffset(extra.node); const alignment = try sema.resolveAlign(block, operand_src, extra.operand); const func = switch (sema.owner.unwrap()) { .func => |func| func, .cau => return sema.fail(block, src, "@setAlignStack outside of function scope", .{}), }; if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) { return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ alignment.toByteUnits().?, }); } switch (Value.fromInterned(func).typeOf(zcu).fnCallingConvention(zcu)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => {}, } zcu.intern_pool.funcMaxStackAlignment(sema.func_index, alignment); } fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src = block.builtinCallArgSrc(extra.node, 0); const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, .{ .needed_comptime_reason = "operand to @setCold must be comptime-known", }); // TODO: should `@setCold` apply to the parent in an inline call? // See also #20642 and friends. const func = switch (sema.owner.unwrap()) { .func => |func| func, .cau => return, // does nothing outside a function }; ip.funcSetCold(func, is_cold); } fn zirDisableInstrumentation(sema: *Sema) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const func = switch (sema.owner.unwrap()) { .func => |func| func, .cau => return, // does nothing outside a function }; ip.funcSetDisableInstrumentation(func); } fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.builtinCallArgSrc(extra.node, 0); block.float_mode = try sema.resolveBuiltinEnum(block, src, extra.operand, "FloatMode", .{ .needed_comptime_reason = "operand to @setFloatMode must be comptime-known", }); } fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand, .{ .needed_comptime_reason = "operand to @setRuntimeSafety must be comptime-known", }); } fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { if (block.is_comptime) return; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const order_src = block.builtinCallArgSrc(extra.node, 0); const order = try sema.resolveAtomicOrder(block, order_src, extra.operand, .{ .needed_comptime_reason = "atomic order of @fence must be comptime-known", }); if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.acquire)) { return sema.fail(block, order_src, "atomic ordering must be acquire or stricter", .{}); } _ = try block.addInst(.{ .tag = .fence, .data = .{ .fence = order }, }); } fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, inst_data.payload_index).data; const operand = try sema.resolveInst(inst_data.operand); const zir_block = extra.block_inst; var block = start_block; while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); const src_loc = if (extra.operand_src_node != Zir.Inst.Break.no_src_node) start_block.nodeOffset(extra.operand_src_node) else null; try label.merges.src_locs.append(sema.gpa, src_loc); try label.merges.results.append(sema.gpa, operand); try label.merges.br_list.append(sema.gpa, br_ref.toIndex().?); block.runtime_index.increment(); if (block.runtime_cond == null and block.runtime_loop == null) { block.runtime_cond = start_block.runtime_cond orelse start_block.runtime_loop; block.runtime_loop = start_block.runtime_loop; } return; } } block = block.parent.?; } } fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { if (block.is_comptime or block.ownerModule().strip) return; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; if (block.instructions.items.len != 0) { const idx = block.instructions.items[block.instructions.items.len - 1]; if (sema.air_instructions.items(.tag)[@intFromEnum(idx)] == .dbg_stmt) { // The previous dbg_stmt didn't correspond to any actual code, so replace it. sema.air_instructions.items(.data)[@intFromEnum(idx)].dbg_stmt = .{ .line = inst_data.line, .column = inst_data.column, }; return; } } _ = try block.addInst(.{ .tag = .dbg_stmt, .data = .{ .dbg_stmt = .{ .line = inst_data.line, .column = inst_data.column, } }, }); } fn zirDbgVar( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!void { const str_op = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_op; const operand = try sema.resolveInst(str_op.operand); const name = str_op.getStr(sema.code); try sema.addDbgVar(block, operand, air_tag, name); } fn addDbgVar( sema: *Sema, block: *Block, operand: Air.Inst.Ref, air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { if (block.is_comptime or block.ownerModule().strip) return; const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const val_ty = switch (air_tag) { .dbg_var_ptr => operand_ty.childType(mod), .dbg_var_val => operand_ty, else => unreachable, }; if (try sema.typeRequiresComptime(val_ty)) return; if (!(try sema.typeHasRuntimeBits(val_ty))) return; if (try sema.resolveValue(operand)) |operand_val| { if (operand_val.canMutateComptimeVarState(mod)) return; } // To ensure the lexical scoping is known to backends, this alloc must be // within a real runtime block. We set a flag which communicates information // to the closest lexically enclosing block: // * If it is a `block_inline`, communicates to logic in `analyzeBodyInner` // to create a post-hoc block. // * Otherwise, communicates to logic in `resolveBlockBody` to create a // real `block` instruction. if (block.need_debug_scope) |ptr| ptr.* = true; // Add the name to the AIR. const name_extra_index = try sema.appendAirString(name); _ = try block.addInst(.{ .tag = air_tag, .data = .{ .pl_op = .{ .payload = name_extra_index, .operand = operand, } }, }); } pub fn appendAirString(sema: *Sema, str: []const u8) Allocator.Error!u32 { const str_extra_index: u32 = @intCast(sema.air_extra.items.len); const elements_used = str.len / 4 + 1; const elements = try sema.air_extra.addManyAsSlice(sema.gpa, elements_used); const buffer = mem.sliceAsBytes(elements); @memcpy(buffer[0..str.len], str); buffer[str.len] = 0; return str_extra_index; } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); const nav_index = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeNavRef(src, nav_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const decl_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); const nav = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeNavVal(block, src, nav); } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.Nav.Index { const pt = sema.pt; const mod = pt.zcu; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |lookup| { assert(lookup.accessible); return lookup.nav; } namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } unreachable; // AstGen detects use of undeclared identifiers. } /// This looks up a member of a specific namespace. It is affected by `usingnamespace` but /// only for ones in the specified namespace. fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, namespace_index: InternPool.NamespaceIndex, ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?struct { nav: InternPool.Nav.Index, /// If `false`, the declaration is in a different file and is not `pub`. /// We still return the declaration for better error reporting. accessible: bool, } { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; try pt.ensureNamespaceUpToDate(namespace_index); const namespace = zcu.namespacePtr(namespace_index); const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu }; const src_file = zcu.namespacePtr(block.namespace).file_scope; if (Type.fromInterned(namespace.owner_type).typeDeclInst(zcu)) |type_decl_inst| { try sema.declareDependency(.{ .namespace_name = .{ .namespace = type_decl_inst, .name = ident_name, } }); } if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) { const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}; defer candidates.deinit(gpa); try checked_namespaces.put(gpa, namespace, {}); var check_i: usize = 0; while (check_i < checked_namespaces.count()) : (check_i += 1) { const check_ns = checked_namespaces.keys()[check_i]; const Pass = enum { @"pub", priv }; for ([2]Pass{ .@"pub", .priv }) |pass| { if (pass == .priv and src_file != check_ns.file_scope) { continue; } const decls, const usingnamespaces = switch (pass) { .@"pub" => .{ &check_ns.pub_decls, &check_ns.pub_usingnamespace }, .priv => .{ &check_ns.priv_decls, &check_ns.priv_usingnamespace }, }; if (decls.getKeyAdapted(ident_name, adapter)) |nav_index| { try candidates.append(gpa, nav_index); } for (usingnamespaces.items) |sub_ns_nav| { try sema.ensureNavResolved(src, sub_ns_nav); const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.resolved.val); const sub_ns = zcu.namespacePtr(sub_ns_ty.getNamespaceIndex(zcu)); try checked_namespaces.put(gpa, sub_ns, {}); } } } ignore_self: { const skip_nav = switch (sema.owner.unwrap()) { .func => break :ignore_self, .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { .none, .type => break :ignore_self, .nav => |nav| nav, }, }; var i: usize = 0; while (i < candidates.items.len) { if (candidates.items[i] == skip_nav) { _ = candidates.orderedRemove(i); } else { i += 1; } } } switch (candidates.items.len) { 0 => {}, 1 => return .{ .nav = candidates.items[0], .accessible = true, }, else => return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "ambiguous reference", .{}); errdefer msg.destroy(gpa); for (candidates.items) |candidate| { try sema.errNote(zcu.navSrcLoc(candidate), msg, "declared here", .{}); } break :msg msg; }), } } else if (namespace.pub_decls.getKeyAdapted(ident_name, adapter)) |nav_index| { return .{ .nav = nav_index, .accessible = true, }; } else if (namespace.priv_decls.getKeyAdapted(ident_name, adapter)) |nav_index| { return .{ .nav = nav_index, .accessible = src_file == namespace.file_scope, }; } return null; } fn funcDeclSrcInst(sema: *Sema, func_inst: Air.Inst.Ref) !?InternPool.TrackedInst.Index { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const func_val = try sema.resolveValue(func_inst) orelse return null; if (func_val.isUndef(zcu)) return null; const nav = switch (ip.indexToKey(func_val.toIntern())) { .@"extern" => |e| e.owner_nav, .func => |f| f.owner_nav, .ptr => |ptr| switch (ptr.base_addr) { .nav => |nav| if (ptr.byte_offset == 0) nav else return null, else => return null, }, else => return null, }; return ip.getNav(nav).srcInst(ip); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; if (block.is_comptime or block.is_typeof) { const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len); return Air.internedToRef(index_val.toIntern()); } if (!block.ownerModule().error_tracing) return .none; const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, error.OutOfMemory => |e| return e, }; return try block.addInst(.{ .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(stack_trace_ty.toIntern()), .payload = @intCast(field_index), } }, }); } /// Add instructions to block to "pop" the error return trace. /// If `operand` is provided, only pops if operand is non-error. fn popErrorReturnTrace( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand); if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val| is_non_error = cond_val.toBool(); } else is_non_error = true; // no operand means pop unconditionally if (is_non_error == true) { // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need // to pop any error trace that may have been propagated from our arguments. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len); const cond_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, .payload = undefined, // updated below }, }, }); var then_block = block.makeSubBlock(); defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, "index", .no_embedded_nulls); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, .void_value); // Otherwise, do nothing var else_block = block.makeSubBlock(); defer else_block.instructions.deinit(gpa); _ = try else_block.addBr(cond_block_inst, .void_value); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block const cond_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(then_block.instructions.items.len), .else_body_len = @intCast(else_block.instructions.items.len), }), } } }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items)); sema.air_instructions.items(.data)[@intFromEnum(cond_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 }); sema.air_extra.appendAssumeCapacity(@intFromEnum(cond_br_inst)); } } fn zirCall( sema: *Sema, block: *Block, inst: Zir.Inst.Index, comptime kind: enum { direct, field }, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const callee_src = block.src(.{ .node_offset_call_func = inst_data.src_node }); const call_src = block.nodeOffset(inst_data.src_node); const ExtraType = switch (kind) { .direct => Zir.Inst.Call, .field => Zir.Inst.FieldCall, }; const extra = sema.code.extraData(ExtraType, inst_data.payload_index); const args_len = extra.data.flags.args_len; const modifier: std.builtin.CallModifier = @enumFromInt(extra.data.flags.packed_modifier); const ensure_result_used = extra.data.flags.ensure_result_used; const pop_error_return_trace = extra.data.flags.pop_error_return_trace; const callee: ResolvedFieldCallee = switch (kind) { .direct => .{ .direct = try sema.resolveInst(extra.data.callee) }, .field => blk: { const object_ptr = try sema.resolveInst(extra.data.obj_ptr); const field_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(extra.data.field_name_start), .no_embedded_nulls, ); const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node }); break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, }; const func: Air.Inst.Ref = switch (callee) { .direct => |func_inst| func_inst, .method => |method| method.func_inst, }; const callee_ty = sema.typeOf(func); const total_args = args_len + @intFromBool(callee == .method); const func_ty = try sema.checkCallArgumentCount(block, func, callee_src, callee_ty, total_args, callee == .method); // The block index before the call, so we can potentially insert an error trace save here later. const block_index: Air.Inst.Index = @enumFromInt(block.instructions.items.len); // This will be set by `analyzeCall` to indicate whether any parameter was an error (making the // error trace potentially dirty). var input_is_error = false; const args_info: CallArgsInfo = .{ .zir_call = .{ .bound_arg = switch (callee) { .direct => .none, .method => |method| method.arg0_inst, }, .bound_arg_src = callee_src, .call_inst = inst, .call_node_offset = inst_data.src_node, .num_args = args_len, .args_body = @ptrCast(sema.code.extra[extra.end..]), .any_arg_is_error = &input_is_error, } }; // AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction. const call_dbg_node: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1); const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call); switch (sema.owner.unwrap()) { .cau => input_is_error = false, .func => |owner_func| if (!mod.intern_pool.funcAnalysisUnordered(owner_func).calls_or_awaits_errorable_fn) { // No errorable fn actually called; we have no error return trace input_is_error = false; }, } if (block.ownerModule().error_tracing and !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { const return_ty = sema.typeOf(call_inst); if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn (...) noreturn", don't pop // TODO: we don't fix up the error trace for always_tail correctly, we should be doing it // *before* the recursive call. This will be a bit tricky to do and probably requires // moving this logic into analyzeCall. But that's probably a good idea anyway. if (modifier == .always_tail) return call_inst; // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated const save_inst = try block.insertInst(block_index, .{ .tag = .save_err_return_trace_index, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(stack_trace_ty.toIntern()), .payload = @intCast(field_index), } }, }); // Pop the error return trace, testing the result for non-error if necessary const operand = if (pop_error_return_trace or modifier == .always_tail) .none else call_inst; try sema.popErrorReturnTrace(block, call_src, operand, save_inst); } return call_inst; } else { return call_inst; } } fn checkCallArgumentCount( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_src: LazySrcLoc, callee_ty: Type, total_args: usize, member_fn: bool, ) !Type { const pt = sema.pt; const mod = pt.zcu; const func_ty = func_ty: { switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) { break :func_ty Type.fromInterned(ptr_info.child); } }, .Optional => { const opt_child = callee_ty.optionalChild(mod); if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(func_src, "cannot call optional type '{}'", .{ callee_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(func_src, msg, "consider using '.?', 'orelse' or 'if'", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } }, else => {}, } return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(pt)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @intFromBool(member_fn); if (func_ty_info.is_var_args) { assert(callConvSupportsVarArgs(func_ty_info.cc)); if (total_args >= fn_params_len) return func_ty; } else if (fn_params_len == total_args) { return func_ty; } const maybe_func_inst = try sema.funcDeclSrcInst(func); const member_str = if (member_fn) "member function " else ""; const variadic_str = if (func_ty_info.is_var_args) "at least " else ""; const msg = msg: { const msg = try sema.errMsg( func_src, "{s}expected {s}{d} argument(s), found {d}", .{ member_str, variadic_str, fn_params_len - @intFromBool(member_fn), args_len, }, ); errdefer msg.destroy(sema.gpa); if (maybe_func_inst) |func_inst| { try sema.errNote(.{ .base_node_inst = func_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }, msg, "function declared here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn callBuiltin( sema: *Sema, block: *Block, call_src: LazySrcLoc, builtin_fn: Air.Inst.Ref, modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, operation: CallOperation, ) !void { const pt = sema.pt; const mod = pt.zcu; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) { break :func_ty Type.fromInterned(ptr_info.child); } }, else => {}, } std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(pt)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); } _ = try sema.analyzeCall( block, builtin_fn, func_ty, call_src, call_src, modifier, false, .{ .resolved = .{ .src = call_src, .args = args } }, null, operation, ); } const CallOperation = enum { call, @"@call", @"@panic", @"safety check", @"error return", }; const CallArgsInfo = union(enum) { /// The full list of resolved (but uncoerced) arguments is known ahead of time. resolved: struct { src: LazySrcLoc, args: []const Air.Inst.Ref, }, /// The list of resolved (but uncoerced) arguments is known ahead of time, but /// originated from a usage of the @call builtin at the given node offset. call_builtin: struct { call_node_offset: i32, args: []const Air.Inst.Ref, }, /// This call corresponds to a ZIR call instruction. The arguments have not yet been /// resolved. They must be resolved by `analyzeCall` so that argument resolution and /// generic instantiation may be interleaved. This is required for RLS to work on /// generic parameters. zir_call: struct { /// This may be `none`, in which case it is ignored. Otherwise, it is the /// already-resolved value of the first argument, from method call syntax. bound_arg: Air.Inst.Ref, /// The source location of `bound_arg` if it is not `null`. Otherwise `undefined`. bound_arg_src: LazySrcLoc, /// The ZIR call instruction. The parameter type is placed at this index while /// analyzing arguments. call_inst: Zir.Inst.Index, /// The node offset of `call_inst`. call_node_offset: i32, /// The number of arguments to this call, not including `bound_arg`. num_args: u32, /// The ZIR corresponding to all function arguments (other than `bound_arg`, if it /// is not `none`). Format is precisely the same as trailing data of ZIR `call`. args_body: []const Zir.Inst.Index, /// This bool will be set to true if any argument evaluated turns out to have an error set or error union type. /// This is used by the caller to restore the error return trace when necessary. any_arg_is_error: *bool, }, fn count(cai: CallArgsInfo) usize { return switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args.len, .zir_call => |zir_call| zir_call.num_args + @intFromBool(zir_call.bound_arg != .none), }; } fn argSrc(cai: CallArgsInfo, block: *Block, arg_index: usize) LazySrcLoc { return switch (cai) { .resolved => |resolved| resolved.src, .call_builtin => |call_builtin| block.src(.{ .call_arg = .{ .call_node_offset = call_builtin.call_node_offset, .arg_index = @intCast(arg_index), } }), .zir_call => |zir_call| if (arg_index == 0 and zir_call.bound_arg != .none) { return zir_call.bound_arg_src; } else block.src(.{ .call_arg = .{ .call_node_offset = zir_call.call_node_offset, .arg_index = @intCast(arg_index - @intFromBool(zir_call.bound_arg != .none)), } }), }; } /// Analyzes the arg at `arg_index` and coerces it to `param_ty`. /// `param_ty` may be `generic_poison`. A value of `null` indicates a varargs parameter. /// `func_ty_info` may be the type before instantiation, even if a generic /// instantiation has been partially completed. fn analyzeArg( cai: CallArgsInfo, sema: *Sema, block: *Block, arg_index: usize, maybe_param_ty: ?Type, func_ty_info: InternPool.Key.FuncType, func_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const param_count = func_ty_info.param_types.len; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], .zir_call => |zir_call| arg_val: { const has_bound_arg = zir_call.bound_arg != .none; if (arg_index == 0 and has_bound_arg) { break :arg_val zir_call.bound_arg; } const real_arg_idx = arg_index - @intFromBool(has_bound_arg); const arg_body = if (real_arg_idx == 0) blk: { const start = zir_call.num_args; const end = @intFromEnum(zir_call.args_body[0]); break :blk zir_call.args_body[start..end]; } else blk: { const start = @intFromEnum(zir_call.args_body[real_arg_idx - 1]); const end = @intFromEnum(zir_call.args_body[real_arg_idx]); break :blk zir_call.args_body[start..end]; }; // Generate args to comptime params in comptime block const parent_comptime = block.is_comptime; defer block.is_comptime = parent_comptime; // Note that we are indexing into parameters, not arguments, so use `arg_index` instead of `real_arg_idx` if (arg_index < @min(param_count, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) { block.is_comptime = true; // TODO set comptime_reason } // Give the arg its result type const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison; sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(provide_param_ty.toIntern())); // Resolve the arg! const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst); if (sema.typeOf(uncoerced_arg).zigTypeTag(mod) == .NoReturn) { // This terminates resolution of arguments. The caller should // propagate this. return uncoerced_arg; } if (sema.typeOf(uncoerced_arg).isError(mod)) { zir_call.any_arg_is_error.* = true; } break :arg_val uncoerced_arg; }, }; const param_ty = maybe_param_ty orelse { return sema.coerceVarArgParam(block, uncoerced_arg, cai.argSrc(block, arg_index)); }; switch (param_ty.toIntern()) { .generic_poison_type => return uncoerced_arg, else => return sema.coerceExtra( block, param_ty, uncoerced_arg, cai.argSrc(block, arg_index), .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(arg_index), } }, ) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }, } } }; /// While performing an inline call, we need to switch between two Sema states a few times: the /// state for the caller (with the callee's `code`, `fn_ret_ty`, etc), and the state for the callee. /// These cannot be two separate Sema instances as they must share AIR. /// Therefore, this struct acts as a helper to switch between the two. /// This switching is required during argument evaluation, where function argument analysis must be /// interleaved with resolving generic parameter types. const InlineCallSema = struct { sema: *Sema, cur: enum { caller, callee, }, other_code: Zir, other_func_index: InternPool.Index, other_fn_ret_ty: Type, other_fn_ret_ty_ies: ?*InferredErrorSet, other_inst_map: InstMap, other_error_return_trace_index_on_fn_entry: Air.Inst.Ref, other_generic_owner: InternPool.Index, other_generic_call_src: LazySrcLoc, /// Sema should currently be set up for the caller (i.e. unchanged yet). This init will not /// change that. The other parameters contain data for the callee Sema. The other modified /// Sema fields are all initialized to default values for the callee. /// Must call deinit on the result. fn init( sema: *Sema, callee_code: Zir, callee_func_index: InternPool.Index, callee_error_return_trace_index_on_fn_entry: Air.Inst.Ref, ) InlineCallSema { return .{ .sema = sema, .cur = .caller, .other_code = callee_code, .other_func_index = callee_func_index, .other_fn_ret_ty = Type.void, .other_fn_ret_ty_ies = null, .other_inst_map = .{}, .other_error_return_trace_index_on_fn_entry = callee_error_return_trace_index_on_fn_entry, .other_generic_owner = .none, .other_generic_call_src = LazySrcLoc.unneeded, }; } /// Switch back to the caller Sema if necessary and free all temporary state of the callee Sema. fn deinit(ics: *InlineCallSema) void { switch (ics.cur) { .caller => {}, .callee => ics.swap(), } // Callee Sema owns the inst_map memory ics.other_inst_map.deinit(ics.sema.gpa); ics.* = undefined; } /// Returns a Sema instance suitable for usage from the caller context. fn caller(ics: *InlineCallSema) *Sema { switch (ics.cur) { .caller => {}, .callee => ics.swap(), } return ics.sema; } /// Returns a Sema instance suitable for usage from the callee context. fn callee(ics: *InlineCallSema) *Sema { switch (ics.cur) { .caller => ics.swap(), .callee => {}, } return ics.sema; } /// Internal use only. Swaps to the other Sema state. fn swap(ics: *InlineCallSema) void { ics.cur = switch (ics.cur) { .caller => .callee, .callee => .caller, }; // zig fmt: off std.mem.swap(Zir, &ics.sema.code, &ics.other_code); std.mem.swap(InternPool.Index, &ics.sema.func_index, &ics.other_func_index); std.mem.swap(Type, &ics.sema.fn_ret_ty, &ics.other_fn_ret_ty); std.mem.swap(?*InferredErrorSet, &ics.sema.fn_ret_ty_ies, &ics.other_fn_ret_ty_ies); std.mem.swap(InstMap, &ics.sema.inst_map, &ics.other_inst_map); std.mem.swap(InternPool.Index, &ics.sema.generic_owner, &ics.other_generic_owner); std.mem.swap(LazySrcLoc, &ics.sema.generic_call_src, &ics.other_generic_call_src); std.mem.swap(Air.Inst.Ref, &ics.sema.error_return_trace_index_on_fn_entry, &ics.other_error_return_trace_index_on_fn_entry); // zig fmt: on } }; fn analyzeCall( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_ty: Type, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallModifier, ensure_result_used: bool, args_info: CallArgsInfo, call_dbg_node: ?Zir.Inst.Index, operation: CallOperation, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const callee_ty = sema.typeOf(func); const func_ty_info = zcu.typeToFunc(func_ty).?; const cc = func_ty_info.cc; if (try sema.resolveValue(func)) |func_val| if (func_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, call_src); if (cc == .Naked) { const maybe_func_inst = try sema.funcDeclSrcInst(func); const msg = msg: { const msg = try sema.errMsg( func_src, "unable to call function with naked calling convention", .{}, ); errdefer msg.destroy(sema.gpa); if (maybe_func_inst) |func_inst| try sema.errNote(.{ .base_node_inst = func_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }, msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const call_tag: Air.Inst.Tag = switch (modifier) { .auto, .always_inline, .compile_time, .no_async, => Air.Inst.Tag.call, .never_tail => Air.Inst.Tag.call_never_tail, .never_inline => Air.Inst.Tag.call_never_inline, .always_tail => Air.Inst.Tag.call_always_tail, .async_kw => return sema.failWithUseOfAsync(block, call_src), }; if (modifier == .never_inline and func_ty_info.cc == .Inline) { return sema.fail(block, call_src, "'never_inline' call of inline function", .{}); } if (modifier == .always_inline and func_ty_info.is_noinline) { return sema.fail(block, call_src, "'always_inline' call of noinline function", .{}); } const gpa = sema.gpa; const is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_inline_call and !is_comptime_call) { if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { is_comptime_call = true; is_inline_call = true; comptime_reason = &.{ .comptime_ret_ty = .{ .func = func, .func_src = func_src, .return_ty = Type.fromInterned(func_ty_info.return_type), } }; } } if (sema.func_is_naked and !is_inline_call and !is_comptime_call) { const msg = msg: { const msg = try sema.errMsg(call_src, "runtime {s} not allowed in naked function", .{@tagName(operation)}); errdefer msg.destroy(sema.gpa); switch (operation) { .call, .@"@call", .@"@panic", .@"error return" => {}, .@"safety check" => try sema.errNote(call_src, msg, "use @setRuntimeSafety to disable runtime safety", .{}), } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (!is_inline_call and is_generic_call) { if (sema.instantiateGenericCall( block, func, func_src, call_src, ensure_result_used, args_info, call_tag, call_dbg_node, )) |some| { return some; } else |err| switch (err) { error.GenericPoison => { is_inline_call = true; }, error.ComptimeReturn => { is_inline_call = true; is_comptime_call = true; comptime_reason = &.{ .comptime_ret_ty = .{ .func = func, .func_src = func_src, .return_ty = Type.fromInterned(func_ty_info.return_type), } }; }, else => |e| return e, } } if (is_comptime_call and modifier == .never_inline) { return sema.fail(block, call_src, "unable to perform 'never_inline' call at compile-time", .{}); } const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{ .needed_comptime_reason = "function being called at comptime must be comptime-known", .block_comptime_reason = comptime_reason, }); const module_fn_index = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .@"extern" => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), .func => func_val.toIntern(), .ptr => |ptr| blk: { switch (ptr.base_addr) { .nav => |nav_index| if (ptr.byte_offset == 0) { const nav = ip.getNav(nav_index); if (nav.isExtern(ip)) return sema.fail(block, call_src, "{s} call of extern function pointer", .{ if (is_comptime_call) "comptime" else "inline", }); break :blk nav.status.resolved.val; }, else => {}, } assert(callee_ty.isPtrAtRuntime(zcu)); return sema.fail(block, call_src, "{s} call of function pointer", .{ if (is_comptime_call) "comptime" else "inline", }); }, else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ if (is_comptime_call) "comptime" else "inline", }); } // Analyze the ZIR. The same ZIR gets analyzed into a runtime function // or an inlined call depending on what union tag the `label` field is // set to in the `Block`. // This block instruction will be used to capture the return value from the // inlined function. const need_debug_scope = !is_comptime_call and !block.is_typeof and !block.ownerModule().strip; const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = if (need_debug_scope) .dbg_inline_block else .block, .data = undefined, }); // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Block.Inlining = .{ .call_block = block, .call_src = call_src, .has_comptime_args = false, .func = module_fn_index, .comptime_result = undefined, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; const module_fn = zcu.funcInfo(module_fn_index); // This is not a function instance, so the function's `Nav` has a // `Cau` -- we don't need to check `generic_owner`. const fn_nav = ip.getNav(module_fn.owner_nav); const fn_cau_index = fn_nav.analysis_owner.unwrap().?; const fn_cau = ip.getCau(fn_cau_index); // We effectively want a child Sema here, but can't literally do that, because we need AIR // to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in // scope, we should use its `caller`/`callee` methods rather than using `sema` directly // whenever performing an operation where the difference matters. var ics = InlineCallSema.init( sema, zcu.cauFileScope(fn_cau_index).zir, module_fn_index, block.error_return_trace_index, ); defer ics.deinit(); var child_block: Block = .{ .parent = null, .sema = sema, // The function body exists in the same namespace as the corresponding function declaration. .namespace = fn_cau.namespace, .instructions = .{}, .label = null, .inlining = &inlining, .is_typeof = block.is_typeof, .is_comptime = is_comptime_call, .comptime_reason = comptime_reason, .error_return_trace_index = block.error_return_trace_index, .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, .src_base_inst = fn_cau.zir_index, .type_name_ctx = fn_nav.fqn, }; const merges = &child_block.inlining.?.merges; defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); try sema.emitBackwardBranch(block, call_src); // Whether this call should be memoized, set to false if the call can // mutate comptime state. // TODO: comptime call memoization is currently not supported under incremental compilation // since dependencies are not marked on callers. If we want to keep this around (we should // check that it's worthwhile first!), each memoized call needs a `Cau`. var should_memoize = !zcu.comp.incremental; // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); const owner_info = zcu.typeToFunc(Type.fromInterned(module_fn.ty)).?; const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len); var new_fn_info: InternPool.GetFuncTypeKey = .{ .param_types = new_param_types, .return_type = owner_info.return_type, .noalias_bits = owner_info.noalias_bits, .cc = if (owner_info.cc_is_generic) null else owner_info.cc, .is_var_args = owner_info.is_var_args, .is_noinline = owner_info.is_noinline, .section_is_generic = owner_info.section_is_generic, .addrspace_is_generic = owner_info.addrspace_is_generic, .is_generic = owner_info.is_generic, }; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" // for a function body, which means we must map the parameter ZIR instructions to // the AIR instructions of the callsite. The callee could be a generic function // which means its parameter type expressions must be resolved in order and used // to successively coerce the arguments. const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail); try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); var arg_i: u32 = 0; for (fn_info.param_body) |inst| { const opt_noreturn_ref = try analyzeInlineCallArg( &ics, block, &child_block, inst, new_param_types, &arg_i, args_info, is_comptime_call, &should_memoize, memoized_arg_values, func_ty_info, func, ); if (opt_noreturn_ref) |ref| { // Analyzing this argument gave a ref of a noreturn type. Terminate argument analysis here. return ref; } } // From here, we only really need to use the callee Sema. Make it the active one, then we // can just use `sema` directly. _ = ics.callee(); if (!inlining.has_comptime_args) { var block_it = block; while (block_it.inlining) |parent_inlining| { if (!parent_inlining.has_comptime_args and parent_inlining.func == module_fn_index) { const err_msg = try sema.errMsg(call_src, "inline call is recursive", .{}); return sema.failWithOwnedErrorMsg(null, err_msg); } block_it = parent_inlining.call_block; } } // In case it is a generic function with an expression for the return type that depends // on parameters, we must now do the same for the return type as we just did with // each of the parameters, resolving the return type and providing it to the child // `Sema` so that it can be used for the `ret_ptr` instruction. const ret_ty_inst = if (fn_info.ret_ty_body.len != 0) try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail) else try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } }; sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); if (module_fn.analysisUnordered(ip).inferred_error_set) { // Create a fresh inferred error set type for inline/comptime calls. const ies = try sema.arena.create(InferredErrorSet); ies.* = .{ .func = .none }; sema.fn_ret_ty_ies = ies; sema.fn_ret_ty = Type.fromInterned(try pt.intern(.{ .error_union_type = .{ .error_set_type = .adhoc_inferred_error_set_type, .payload_type = sema.fn_ret_ty.toIntern(), } })); } // This `res2` is here instead of directly breaking from `res` due to a stage1 // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { if (zcu.intern_pool.getIfExists(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, .result = .none, } })) |memoized_call_index| { const memoized_call = zcu.intern_pool.indexToKey(memoized_call_index).memoized_call; break :res2 Air.internedToRef(memoized_call.result); } } new_fn_info.return_type = sema.fn_ret_ty.toIntern(); if (!is_comptime_call and !block.is_typeof) { const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[@intFromEnum(param)]) { .param, .param_comptime => { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].pl_tok; const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); const param_name = sema.code.nullTerminatedString(extra.data.name); const inst = sema.inst_map.get(param).?; try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name); }, .param_anytype, .param_anytype_comptime => { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(param)].str_tok; const param_name = inst_data.get(sema.code); const inst = sema.inst_map.get(param).?; try sema.addDbgVar(&child_block, inst, .dbg_var_val, param_name); }, else => continue, }; } if (is_comptime_call and ensure_result_used) { try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src); } if (is_comptime_call or block.is_typeof) { // Save the error trace as our first action in the function // to match the behavior of runtime function calls. const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block); sema.error_return_trace_index_on_fn_entry = error_return_trace_index; child_block.error_return_trace_index = error_return_trace_index; } const result = result: { sema.analyzeFnBody(&child_block, fn_info.body) catch |err| switch (err) { error.ComptimeReturn => break :result inlining.comptime_result, else => |e| return e, }; break :result try sema.resolveAnalyzedBlock(block, call_src, &child_block, merges, need_debug_scope); }; if (is_comptime_call) { const result_val = try sema.resolveConstValue(block, LazySrcLoc.unneeded, result, undefined); const result_interned = result_val.toIntern(); // Transform ad-hoc inferred error set types into concrete error sets. const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned); // If the result can mutate comptime vars, we must not memoize it, as it contains // a reference to `comptime_allocs` so is not stable across instances of `Sema`. // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) { _ = try pt.intern(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, .result = result_transformed, } }); } break :res2 Air.internedToRef(result_transformed); } if (try sema.resolveValue(result)) |result_val| { const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_val.toIntern()); break :res2 Air.internedToRef(result_transformed); } const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern()); if (new_ty != .none) { // TODO: mutate in place the previous instruction if possible // rather than adding a bitcast instruction. break :res2 try block.addBitCast(Type.fromInterned(new_ty), result); } break :res2 result; }; break :res res2; } else res: { assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, args_info.count()); for (args, 0..) |*arg_out, arg_idx| { // Non-generic, so param types are already resolved const param_ty: ?Type = if (arg_idx < func_ty_info.param_types.len) ty: { break :ty Type.fromInterned(func_ty_info.param_types.get(ip)[arg_idx]); } else null; if (param_ty) |t| assert(!t.isGenericPoison()); arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func); try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*); if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .NoReturn) { return arg_out.*; } } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); switch (sema.owner.unwrap()) { .cau => {}, .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) { ip.funcSetCallsOrAwaitsErrorableFn(owner_func); }, } if (try sema.resolveValue(func)) |func_val| { if (zcu.intern_pool.isFuncBody(func_val.toIntern())) { try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() })); try zcu.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + args.len); const func_inst = try block.addInst(.{ .tag = call_tag, .data = .{ .pl_op = .{ .operand = func, .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = @intCast(args.len), }), } }, }); sema.appendRefsAssumeCapacity(args); if (call_tag == .call_always_tail) { if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); } if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. if (try sema.resolveValue(func)) |func_val| { switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .func => break :skip_safety, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .nav => |nav| if (!ip.getNav(nav).isExtern(ip)) break :skip_safety, else => {}, }, else => {}, } } try sema.safetyPanic(block, call_src, .noreturn_returned); return .unreachable_value; } if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return .unreachable_value; } break :res func_inst; }; if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(result), call_src); } return result; } fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const target = zcu.getTarget(); const backend = zcu.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{ @tagName(backend), @tagName(target.cpu.arch), }); } const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty); if (owner_func_ty.toIntern() != func_ty.toIntern()) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ func_ty.fmt(pt), owner_func_ty.fmt(pt), }); } _ = try block.addUnOp(.ret, result); return .unreachable_value; } /// Usually, returns null. If an argument was noreturn, returns that ref (which should become the call result). fn analyzeInlineCallArg( ics: *InlineCallSema, arg_block: *Block, param_block: *Block, inst: Zir.Inst.Index, new_param_types: []InternPool.Index, arg_i: *u32, args_info: CallArgsInfo, is_comptime_call: bool, should_memoize: *bool, memoized_arg_values: []InternPool.Index, func_ty_info: InternPool.Key.FuncType, func_inst: Air.Inst.Ref, ) !?Air.Inst.Ref { const mod = ics.sema.pt.zcu; const ip = &mod.intern_pool; const zir_tags = ics.callee().code.instructions.items(.tag); switch (zir_tags[@intFromEnum(inst)]) { .param_comptime, .param_anytype_comptime => param_block.inlining.?.has_comptime_args = true, else => {}, } switch (zir_tags[@intFromEnum(inst)]) { .param, .param_comptime => { // Evaluate the parameter type expression now that previous ones have // been mapped, and coerce the corresponding argument to it. const pl_tok = ics.callee().code.instructions.items(.data)[@intFromEnum(inst)].pl_tok; const param_src = param_block.tokenOffset(pl_tok.src_tok); const extra = ics.callee().code.extraData(Zir.Inst.Param, pl_tok.payload_index); const param_body = ics.callee().code.bodySlice(extra.end, extra.data.body_len); const param_ty = param_ty: { const raw_param_ty = func_ty_info.param_types.get(ip)[arg_i.*]; if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try ics.callee().resolveInlineBody(param_block, param_body, inst); const param_ty = try ics.callee().analyzeAsType(param_block, param_src, param_ty_inst); break :param_ty param_ty.toIntern(); }; new_param_types[arg_i.*] = param_ty; const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst); if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) { return casted_arg; } const arg_src = args_info.argSrc(arg_block, arg_i.*); if (try ics.callee().typeRequiresComptime(Type.fromInterned(param_ty))) { _ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{ .needed_comptime_reason = "argument to parameter with comptime-only type must be comptime-known", .block_comptime_reason = param_block.comptime_reason, }); } else if (!is_comptime_call and zir_tags[@intFromEnum(inst)] == .param_comptime) { _ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{ .needed_comptime_reason = "parameter is comptime", }); } if (is_comptime_call) { ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg); const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{ .needed_comptime_reason = "argument to function being called at comptime must be comptime-known", .block_comptime_reason = param_block.comptime_reason, }); switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, else => {}, } // Needed so that lazy values do not trigger // assertion due to type not being resolved // when the hash function is called. const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val); should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern(); } else { ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } if (try ics.caller().resolveValue(casted_arg)) |_| { param_block.inlining.?.has_comptime_args = true; } arg_i.* += 1; }, .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst); if (ics.caller().typeOf(uncasted_arg).zigTypeTag(mod) == .NoReturn) { return uncasted_arg; } const arg_src = args_info.argSrc(arg_block, arg_i.*); new_param_types[arg_i.*] = ics.caller().typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); const arg_val = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{ .needed_comptime_reason = "argument to function being called at comptime must be comptime-known", .block_comptime_reason = param_block.comptime_reason, }); switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, else => {}, } // Needed so that lazy values do not trigger // assertion due to type not being resolved // when the hash function is called. const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val); should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); memoized_arg_values[arg_i.*] = resolved_arg_val.toIntern(); } else { if (zir_tags[@intFromEnum(inst)] == .param_anytype_comptime) { _ = try ics.caller().resolveConstValue(arg_block, arg_src, uncasted_arg, .{ .needed_comptime_reason = "parameter is comptime", }); } ics.callee().inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); } if (try ics.caller().resolveValue(uncasted_arg)) |_| { param_block.inlining.?.has_comptime_args = true; } arg_i.* += 1; }, else => {}, } return null; } fn instantiateGenericCall( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, ensure_result_used: bool, args_info: CallArgsInfo, call_tag: Air.Inst.Tag, call_dbg_node: ?Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const gpa = sema.gpa; const ip = &zcu.intern_pool; const func_val = try sema.resolveConstDefinedValue(block, func_src, func, .{ .needed_comptime_reason = "generic function being called must be comptime-known", }); const generic_owner = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .func => func_val.toIntern(), .ptr => |ptr| ip.getNav(ptr.base_addr.nav).status.resolved.val, else => unreachable, }; const generic_owner_func = zcu.intern_pool.indexToKey(generic_owner).func; const generic_owner_ty_info = zcu.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?; try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst }); // Even though there may already be a generic instantiation corresponding // to this callsite, we must evaluate the expressions of the generic // function signature with the values of the callsite plugged in. // Importantly, this may include type coercions that determine whether the // instantiation is a match of a previous instantiation. // The actual monomorphization happens via adding `func_instance` to // `InternPool`. // Since we are looking at the generic owner here, it has a `Cau`. const fn_nav = ip.getNav(generic_owner_func.owner_nav); const fn_cau = ip.getCau(fn_nav.analysis_owner.unwrap().?); const fn_zir = zcu.namespacePtr(fn_cau.namespace).fileScope(zcu).zir; const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip) orelse return error.AnalysisFail); const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count()); @memset(comptime_args, .none); // We may overestimate the number of runtime args, but this will definitely be sufficient. const max_runtime_args = args_info.count() - @popCount(generic_owner_ty_info.comptime_bits); var runtime_args = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(sema.arena, max_runtime_args); // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a // new, monomorphized function, with the comptime parameters elided. var child_sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = sema.arena, .code = fn_zir, // We pass the generic callsite's owner decl here because whatever `Decl` // dependencies are chased at this point should be attached to the // callsite, not the `Decl` associated with the `func_instance`. .owner = sema.owner, .func_index = sema.func_index, // This may not be known yet, since the calling convention could be generic, but there // should be no illegal instructions encountered while creating the function anyway. .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_args = comptime_args, .generic_owner = generic_owner, .generic_call_src = call_src, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, .comptime_err_ret_trace = sema.comptime_err_ret_trace, }; defer child_sema.deinit(); var child_block: Block = .{ .parent = null, .sema = &child_sema, .namespace = fn_cau.namespace, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = fn_cau.zir_index, .type_name_ctx = fn_nav.fqn, }; defer child_block.instructions.deinit(gpa); try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); for (fn_info.param_body[0..args_info.count()], 0..) |param_inst, arg_index| { const param_tag = fn_zir.instructions.items(.tag)[@intFromEnum(param_inst)]; const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) { else => |ty| Type.fromInterned(ty), // parameter is not generic, so type is already resolved .generic_poison_type => param_ty: { // We have every parameter before this one, so can resolve this parameter's type now. // However, first check the param type, since it may be anytype. switch (param_tag) { .param_anytype, .param_anytype_comptime => { // The parameter doesn't have a type. break :param_ty Type.generic_poison; }, .param, .param_comptime => { // We now know every prior parameter, so can resolve this // parameter's type. The child sema has these types. const param_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok; const param_extra = fn_zir.extraData(Zir.Inst.Param, param_data.payload_index); const param_ty_body = fn_zir.bodySlice(param_extra.end, param_extra.data.body_len); // Make sure any nested instructions don't clobber our work. const prev_params = child_block.params; const prev_no_partial_func_ty = child_sema.no_partial_func_ty; const prev_generic_owner = child_sema.generic_owner; const prev_generic_call_src = child_sema.generic_call_src; child_block.params = .{}; child_sema.no_partial_func_ty = true; child_sema.generic_owner = .none; child_sema.generic_call_src = LazySrcLoc.unneeded; defer { child_block.params = prev_params; child_sema.no_partial_func_ty = prev_no_partial_func_ty; child_sema.generic_owner = prev_generic_owner; child_sema.generic_call_src = prev_generic_call_src; } const param_ty_inst = try child_sema.resolveInlineBody(&child_block, param_ty_body, param_inst); break :param_ty try child_sema.analyzeAsType( &child_block, child_block.tokenOffset(param_data.src_tok), param_ty_inst, ); }, else => unreachable, } }, }; const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func); try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_index), arg_ref); const arg_ty = sema.typeOf(arg_ref); if (arg_ty.zigTypeTag(zcu) == .NoReturn) { // This terminates argument analysis. return arg_ref; } const arg_is_comptime = switch (param_tag) { .param_comptime, .param_anytype_comptime => true, .param, .param_anytype => try sema.typeRequiresComptime(arg_ty), else => unreachable, }; if (arg_is_comptime) { if (try sema.resolveValue(arg_ref)) |arg_val| { comptime_args[arg_index] = arg_val.toIntern(); child_sema.inst_map.putAssumeCapacityNoClobber( param_inst, Air.internedToRef(arg_val.toIntern()), ); } else switch (param_tag) { .param_comptime, .param_anytype_comptime, => return sema.failWithOwnedErrorMsg(block, msg: { const arg_src = args_info.argSrc(block, arg_index); const msg = try sema.errMsg(arg_src, "runtime-known argument passed to comptime parameter", .{}); errdefer msg.destroy(sema.gpa); const param_src = child_block.tokenOffset(switch (param_tag) { .param_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src_tok, .param_anytype_comptime => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src_tok, else => unreachable, }); try child_sema.errNote(param_src, msg, "declared comptime here", .{}); break :msg msg; }), .param, .param_anytype, => return sema.failWithOwnedErrorMsg(block, msg: { const arg_src = args_info.argSrc(block, arg_index); const msg = try sema.errMsg(arg_src, "runtime-known argument passed to parameter of comptime-only type", .{}); errdefer msg.destroy(sema.gpa); const param_src = child_block.tokenOffset(switch (param_tag) { .param => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok.src_tok, .param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.src_tok, else => unreachable, }); try child_sema.errNote(param_src, msg, "declared here", .{}); try sema.explainWhyTypeIsComptime(msg, arg_src, arg_ty); break :msg msg; }), else => unreachable, } } else { // The parameter is runtime-known. const param_name: Zir.NullTerminatedString = switch (param_tag) { .param_anytype => fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].str_tok.start, .param => name: { const inst_data = fn_zir.instructions.items(.data)[@intFromEnum(param_inst)].pl_tok; const extra = fn_zir.extraData(Zir.Inst.Param, inst_data.payload_index); break :name extra.data.name; }, else => unreachable, }; child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ .ty = Air.internedToRef(arg_ty.toIntern()), .name = if (child_block.ownerModule().strip) .none else @enumFromInt(try sema.appendAirString(fn_zir.nullTerminatedString(param_name))), } }, })); try child_block.params.append(sema.arena, .{ .ty = arg_ty.toIntern(), // This is the type after coercion .is_comptime = false, // We're adding only runtime args to the instantiation .name = param_name, }); runtime_args.appendAssumeCapacity(arg_ref); } } // We've already handled parameters, so don't resolve the whole body. Instead, just // do the instructions after the params (i.e. the func itself). const new_func_inst = try child_sema.resolveInlineBody(&child_block, fn_info.param_body[args_info.count()..], fn_info.param_body_inst); const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern(); const callee = zcu.funcInfo(callee_index); callee.maxBranchQuota(ip, sema.branch_quota); // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); const func_ty_info = zcu.typeToFunc(func_ty).?; // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead // call it inline. if (func_ty_info.is_generic or func_ty_info.cc == .Inline) { return error.GenericPoison; } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); switch (sema.owner.unwrap()) { .cau => {}, .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) { ip.funcSetCallsOrAwaitsErrorableFn(owner_func); }, } try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); try zcu.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); const result = try block.addInst(.{ .tag = call_tag, .data = .{ .pl_op = .{ .operand = Air.internedToRef(callee_index), .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = @intCast(runtime_args.items.len), }), } }, }); sema.appendRefsAssumeCapacity(runtime_args.items); // `child_sema` is owned by us, so just take its exports. try sema.exports.appendSlice(sema.gpa, child_sema.exports.items); if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(result), call_src); } if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } if (func_ty.fnReturnType(zcu).isNoReturn(zcu)) { _ = try block.addNoOp(.unreach); return .unreachable_value; } return result; } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const tuple = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| tuple, else => return, }; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { try sema.resolveTupleLazyValues(block, src, Type.fromInterned(field_ty)); if (field_val == .none) continue; // TODO: mutate in intern pool _ = try sema.resolveLazyValue(Value.fromInterned(field_val)); } } fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const int_type = sema.code.instructions.items(.data)[@intFromEnum(inst)].int_type; const ty = try sema.pt.intType(int_type.signedness, int_type.bit_count); return Air.internedToRef(ty.toIntern()); } fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const child_type = try sema.resolveType(block, operand_src, inst_data.operand); if (child_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(pt)}); } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(pt)}); } const opt_type = try pt.optionalType(child_type.toIntern()); return Air.internedToRef(opt_type.toIntern()); } fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const bin = sema.code.instructions.items(.data)[@intFromEnum(inst)].bin; const maybe_wrapped_indexable_ty = sema.resolveType(block, LazySrcLoc.unneeded, bin.lhs) catch |err| switch (err) { // Since this is a ZIR instruction that returns a type, encountering // generic poison should not result in a failed compilation, but the // generic poison type. This prevents unnecessary failures when // constructing types at compile-time. error.GenericPoison => return .generic_poison_type, else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); try indexable_ty.resolveFields(pt); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); return Air.internedToRef(elem_type.toIntern()); } else { const elem_type = indexable_ty.elemType2(mod); return Air.internedToRef(elem_type.toIntern()); } } fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const maybe_wrapped_ptr_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) { error.GenericPoison => return .generic_poison_type, else => |e| return e, }; const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(mod); assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction const elem_ty = ptr_ty.childType(mod); if (elem_ty.toIntern() == .anyopaque_type) { // The pointer's actual child type is effectively unknown, so it makes // sense to represent it with a generic poison. return .generic_poison_type; } return Air.internedToRef(ptr_ty.childType(mod).toIntern()); } fn zirIndexablePtrElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(un_node.src_node); const ptr_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) { error.GenericPoison => return .generic_poison_type, else => |e| return e, }; try sema.checkMemOperand(block, src, ptr_ty); const elem_ty = switch (ptr_ty.ptrSize(mod)) { .Slice, .Many, .C => ptr_ty.childType(mod), .One => ptr_ty.childType(mod).childType(mod), }; return Air.internedToRef(elem_ty.toIntern()); } fn zirVectorElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const vec_ty = sema.resolveType(block, LazySrcLoc.unneeded, un_node.operand) catch |err| switch (err) { // Since this is a ZIR instruction that returns a type, encountering // generic poison should not result in a failed compilation, but the // generic poison type. This prevents unnecessary failures when // constructing types at compile-time. error.GenericPoison => return .generic_poison_type, else => |e| return e, }; if (!vec_ty.isVector(mod)) { return sema.fail(block, block.nodeOffset(un_node.src_node), "expected vector type, found '{}'", .{vec_ty.fmt(pt)}); } return Air.internedToRef(vec_ty.childType(mod).toIntern()); } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const len_src = block.builtinCallArgSrc(inst_data.src_node, 0); const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, .{ .needed_comptime_reason = "vector length must be comptime-known", })); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try sema.pt.vectorType(.{ .len = len, .child = elem_type.toIntern(), }); return Air.internedToRef(vector_type.toIntern()); } fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node }); const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node }); const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, .{ .needed_comptime_reason = "array length must be comptime-known", }); const elem_type = try sema.resolveType(block, elem_src, extra.rhs); try sema.validateArrayElemType(block, elem_type, elem_src); const array_ty = try sema.pt.arrayType(.{ .len = len, .child = elem_type.toIntern(), }); return Air.internedToRef(array_ty.toIntern()); } fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data; const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node }); const sentinel_src = block.src(.{ .node_offset_array_type_sentinel = inst_data.src_node }); const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node }); const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, .{ .needed_comptime_reason = "array length must be comptime-known", }); const elem_type = try sema.resolveType(block, elem_src, extra.elem_type); try sema.validateArrayElemType(block, elem_type, elem_src); const uncasted_sentinel = try sema.resolveInst(extra.sentinel); const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src); const sentinel_val = try sema.resolveConstDefinedValue(block, sentinel_src, sentinel, .{ .needed_comptime_reason = "array sentinel value must be comptime-known", }); const array_ty = try sema.pt.arrayType(.{ .len = len, .sentinel = sentinel_val.toIntern(), .child = elem_type.toIntern(), }); return Air.internedToRef(array_ty.toIntern()); } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { const pt = sema.pt; const mod = pt.zcu; if (elem_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(pt)}); } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; if (true) { return sema.failWithUseOfAsync(block, block.nodeOffset(inst_data.src_node)); } const mod = sema.mod; const operand_src = block.src(.{ .node_offset_anyframe_type = inst_data.src_node }); const return_type = try sema.resolveType(block, operand_src, inst_data.operand); const anyframe_type = try mod.anyframeType(return_type); return Air.internedToRef(anyframe_type.toIntern()); } fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ error_set.fmt(pt), }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); const err_union_ty = try pt.errorUnionType(error_set, payload); return Air.internedToRef(err_union_ty.toIntern()); } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { const pt = sema.pt; const mod = pt.zcu; if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ payload_ty.fmt(pt), }); } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ payload_ty.fmt(pt), }); } } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try pt.zcu.intern_pool.getOrPutString( sema.gpa, pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); _ = try pt.getErrorValue(name); // Create an error set type with only this error value, and return the value. const error_set_type = try pt.singleErrorSetType(name); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, } }))); } fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); const err_int_ty = try pt.errorIntType(); if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) { return pt.undefRef(err_int_ty); } const err_name = ip.indexToKey(val.toIntern()).err.name; return Air.internedToRef((try pt.intValue( err_int_ty, try pt.getErrorValue(err_name), )).toIntern()); } const op_ty = sema.typeOf(uncasted_operand); switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) { .anyerror_type => {}, else => |err_set_ty_index| { const names = ip.indexToKey(err_set_ty_index).error_set_type.names; switch (names.len) { 0 => return Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()), 1 => return pt.intRef(err_int_ty, ip.getErrorValueIfExists(names.get(ip)[0]).?), else => {}, } }, } try sema.requireRuntimeBlock(block, src, operand_src); return block.addBitCast(err_int_ty, operand); } fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const uncasted_operand = try sema.resolveInst(extra.operand); const err_int_ty = try pt.errorIntType(); const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt)); if (int > len: { const mutate = &ip.global_error_set.mutate; mutate.map.mutex.lock(); defer mutate.map.mutex.unlock(); break :len mutate.names.len; } or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = .anyerror_type, .name = ip.global_error_set.shared.names.acquire().view().items(.@"0")[int - 1], } }))); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); const zero_val = Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bool_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, src, ok, .invalid_error_code); } return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = .anyerror_type, .operand = operand, } }, }); } fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "'||' merges error sets; 'or' performs boolean OR", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(pt)}); if (rhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(pt)}); // Anything merged with anyerror is anyerror. if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return .anyerror_type; } if (ip.isInferredErrorSetType(lhs_ty.toIntern())) { switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) { // isAnyError might have changed from a false negative to a true // positive after resolution. .anyerror_type => return .anyerror_type, else => {}, } } if (ip.isInferredErrorSetType(rhs_ty.toIntern())) { switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) { // isAnyError might have changed from a false negative to a true // positive after resolution. .anyerror_type => return .anyerror_type, else => {}, } } const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); return Air.internedToRef(err_set_ty.toIntern()); } fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = inst_data.get(sema.code); return Air.internedToRef((try pt.intern(.{ .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, pt.tid, name, .no_embedded_nulls), }))); } fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { try operand_ty.resolveFields(pt); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, operand_src, "untagged union '{}' cannot be converted to integer", .{src}, ); }; break :blk try sema.unionToTag(block, tag_ty, operand, operand_src); }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{ operand_ty.fmt(pt), }); }, }; const enum_tag_ty = sema.typeOf(enum_tag); const int_tag_ty = enum_tag_ty.intTagType(mod); // TODO: use correct solution // https://github.com/ziglang/zig/issues/15909 if (enum_tag_ty.enumFieldCount(mod) == 0 and !enum_tag_ty.isNonexhaustiveEnum(mod)) { return sema.fail(block, operand_src, "cannot use @intFromEnum on empty enum '{}'", .{ enum_tag_ty.fmt(pt), }); } if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return Air.internedToRef((try pt.getCoerced(opv, int_tag_ty)).toIntern()); } if (try sema.resolveValue(enum_tag)) |enum_tag_val| { if (enum_tag_val.isUndef(mod)) { return pt.undefRef(int_tag_ty); } const val = try enum_tag_val.intFromEnum(enum_tag_ty, pt); return Air.internedToRef(val.toIntern()); } try sema.requireRuntimeBlock(block, src, operand_src); return block.addBitCast(int_tag_ty, enum_tag); } fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@enumFromInt"); const operand = try sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(pt)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveValue(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum(mod)) { const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern()); } return sema.fail(block, src, "int value '{}' out of range of non-exhaustive enum '{}'", .{ int_val.fmtValueSema(pt, sema), dest_ty.fmt(pt), }); } if (int_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!(try sema.enumHasInt(dest_ty, int_val))) { return sema.fail(block, src, "enum '{}' has no tag with value '{}'", .{ dest_ty.fmt(pt), int_val.fmtValueSema(pt, sema), }); } return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern()); } if (dest_ty.intTagType(mod).zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, .{ .needed_comptime_reason = "value being casted to enum with 'comptime_int' tag type must be comptime-known", }); } if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| { const result = Air.internedToRef(opv.toIntern()); // The operand is runtime-known but the result is comptime-known. In // this case we still need a safety check. // TODO add a safety check here. we can't use is_named_enum_value - // it needs to convert the enum back to int and make sure it equals the operand int. return result; } try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); try sema.addSafetyCheck(block, src, ok, .invalid_enum_value); } return result; } /// Pointer in, pointer out. fn zirOptionalPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const optional_ptr = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); return sema.analyzeOptionalPayloadPtr(block, src, optional_ptr, safety_check, false); } fn analyzeOptionalPayloadPtr( sema: *Sema, block: *Block, src: LazySrcLoc, optional_ptr: Air.Inst.Ref, safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag(zcu) == .Pointer); const opt_type = optional_ptr_ty.childType(zcu); if (opt_type.zigTypeTag(zcu) != .Optional) { return sema.failWithExpectedOptionalType(block, src, opt_type); } const child_type = opt_type.optionalChild(zcu); const child_pointer = try pt.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), .address_space = optional_ptr_ty.ptrAddressSpace(zcu), }, }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { if (sema.isComptimeMutablePtr(ptr_val)) { // Set the optional to non-null at comptime. // If the payload is OPV, we must use that value instead of undef. const payload_val = try sema.typeHasOnePossibleValue(child_type) orelse try pt.undefValue(child_type); const opt_val = try pt.intern(.{ .opt = .{ .ty = opt_type.toIntern(), .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, src, ptr_val, Value.fromInterned(opt_val), opt_type); } else { // Emit runtime instructions to set the optional non-null bit. const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } return Air.internedToRef((try ptr_val.ptrOptPayload(pt)).toIntern()); } } try sema.requireRuntimeBlock(block, src, null); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null); } if (initializing) { const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); return opt_payload_ptr; } else { return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); } } /// Value in, value out. fn zirOptionalPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag(mod)) { .Optional => operand_ty.optionalChild(mod), .Pointer => t: { if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); break :t try pt.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, }, }); }, else => return sema.failWithExpectedOptionalType(block, src, operand_ty), }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { return if (val.optionalValue(mod)) |payload| Air.internedToRef(payload.toIntern()) else sema.fail(block, src, "unable to unwrap null", .{}); } try sema.requireRuntimeBlock(block, src, null); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null, operand); try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null); } return block.addTyOp(.optional_payload, result_ty, operand); } /// Value in, value out fn zirErrUnionPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(pt), }); } return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false); } fn analyzeErrUnionPayload( sema: *Sema, block: *Block, src: LazySrcLoc, err_union_ty: Type, operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getErrorName(mod).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } return Air.internedToRef(mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload); } try sema.requireRuntimeBlock(block, src, null); // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err); } return block.addTyOp(.unwrap_errunion_payload, payload_ty, operand); } /// Pointer in, pointer out. fn zirErrUnionPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); return sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } fn analyzeErrUnionPayloadPtr( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(zcu) == .Pointer); if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.childType(zcu).fmt(pt), }); } const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); const operand_pointer_ty = try pt.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), .address_space = operand_ty.ptrAddressSpace(zcu), }, }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { if (sema.isComptimeMutablePtr(ptr_val)) { // Set the error union to non-error at comptime. // If the payload is OPV, we must use that value instead of undef. const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty); const eu_val = try pt.intern(.{ .error_union = .{ .ty = err_union_ty.toIntern(), .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, src, ptr_val, Value.fromInterned(eu_val), err_union_ty); } else { // Emit runtime instructions to set the error union error code. try sema.requireRuntimeBlock(block, src, null); const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } return Air.internedToRef((try ptr_val.ptrEuPayload(pt)).toIntern()); } } try sema.requireRuntimeBlock(block, src, null); // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and !err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } if (initializing) { const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); return eu_payload_ptr; } else { return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); } } /// Value in, value out fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); return sema.analyzeErrUnionCode(block, src, operand); } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.fmt(pt), }); } const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = result_ty.toIntern(), .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, } }))); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.unwrap_errunion_err, result_ty, operand); } /// Pointer in, value out fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); return sema.analyzeErrUnionCodePtr(block, src, operand); } fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.childType(mod).fmt(pt), }); } const result_ty = operand_ty.childType(mod).errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { assert(val.getErrorName(mod) != .none); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = result_ty.toIntern(), .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, } }))); } } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } fn zirFunc( sema: *Sema, block: *Block, inst: Zir.Inst.Index, inferred_error_set: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); const target = zcu.getTarget(); const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node }); var extra_index = extra.end; const ret_ty: Type = switch (extra.data.ret_body_len) { 0 => Type.void, 1 => blk: { const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| { break :blk ret_ty; } else |err| switch (err) { error.GenericPoison => { break :blk Type.generic_poison; }, else => |e| return e, } }, else => blk: { const ret_ty_body = sema.code.bodySlice(extra_index, extra.data.ret_body_len); extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, .{ .needed_comptime_reason = "return type must be comptime-known", }); break :blk ret_ty_val.toType(); }, }; var src_locs: Zir.Inst.Func.SrcLocs = undefined; const has_body = extra.data.body_len != 0; if (has_body) { extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } // If this instruction has a body, then it's a function declaration, and we decide // the callconv based on whether it is exported. Otherwise, the callconv defaults // to `.Unspecified`. const cc: std.builtin.CallingConvention = if (has_body) cc: { const func_decl_cau = if (sema.generic_owner != .none) cau: { const generic_owner_fn = zcu.funcInfo(sema.generic_owner); // The generic owner definitely has a `Cau` for the corresponding function declaration. const generic_owner_nav = ip.getNav(generic_owner_fn.owner_nav); break :cau generic_owner_nav.analysis_owner.unwrap().?; } else sema.owner.unwrap().cau; const fn_is_exported = exported: { const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip) orelse return error.AnalysisFail; const zir_decl = sema.code.getDeclaration(decl_inst)[0]; break :exported zir_decl.flags.is_export; }; break :cc if (fn_is_exported) .C else .Unspecified; } else .Unspecified; return sema.funcCommon( block, inst_data.src_node, inst, .none, target_util.defaultAddressSpace(target, .function), .default, cc, ret_ty, false, inferred_error_set, false, has_body, src_locs, null, 0, false, ); } fn resolveGenericBody( sema: *Sema, block: *Block, src: LazySrcLoc, body: []const Zir.Inst.Index, func_inst: Zir.Inst.Index, dest_ty: Type, reason: NeededComptimeReason, ) !Value { assert(body.len != 0); const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; const prev_no_partial_func_type = sema.no_partial_func_ty; const prev_generic_owner = sema.generic_owner; const prev_generic_call_src = sema.generic_call_src; block.params = .{}; sema.no_partial_func_ty = true; sema.generic_owner = .none; sema.generic_call_src = LazySrcLoc.unneeded; defer { block.params = prev_params; sema.no_partial_func_ty = prev_no_partial_func_type; sema.generic_owner = prev_generic_owner; sema.generic_call_src = prev_generic_call_src; } const uncasted = sema.resolveInlineBody(block, body, func_inst) catch |err| break :err err; const result = sema.coerce(block, dest_ty, uncasted, src) catch |err| break :err err; const val = sema.resolveConstDefinedValue(block, src, result, reason) catch |err| break :err err; return val; }; switch (err) { error.GenericPoison => { if (dest_ty.toIntern() == .type_type) { return Value.generic_poison_type; } else { return Value.generic_poison; } }, else => |e| return e, } } /// Given a library name, examines if the library name should end up in /// `link.File.Options.system_libs` table (for example, libc is always /// specified via dedicated flag `link_libc` instead), /// and puts it there if it doesn't exist. /// It also dupes the library name which can then be saved as part of the /// respective `Decl` (either `ExternFn` or `Var`). /// The liveness of the duped library name is tied to liveness of `Module`. /// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`). fn handleExternLibName( sema: *Sema, block: *Block, src_loc: LazySrcLoc, lib_name: []const u8, ) CompileError!void { blk: { const pt = sema.pt; const mod = pt.zcu; const comp = mod.comp; const target = mod.getTarget(); log.debug("extern fn symbol expected in lib '{s}'", .{lib_name}); if (target.is_libc_lib_name(lib_name)) { if (!comp.config.link_libc) { return sema.fail( block, src_loc, "dependency on libc must be explicitly specified in the build command", .{}, ); } break :blk; } if (target.is_libcpp_lib_name(lib_name)) { if (!comp.config.link_libcpp) return sema.fail( block, src_loc, "dependency on libc++ must be explicitly specified in the build command", .{}, ); break :blk; } if (mem.eql(u8, lib_name, "unwind")) { if (!comp.config.link_libunwind) return sema.fail( block, src_loc, "dependency on libunwind must be explicitly specified in the build command", .{}, ); break :blk; } if (!target.isWasm() and !block.ownerModule().pic) { return sema.fail( block, src_loc, "dependency on dynamic library '{s}' requires enabling Position Independent Code; fixed by '-l{s}' or '-fPIC'", .{ lib_name, lib_name }, ); } comp.addLinkLib(lib_name) catch |err| { return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{ lib_name, @errorName(err), }); }; } } /// These are calling conventions that are confirmed to work with variadic functions. /// Any calling conventions not included here are either not yet verified to work with variadic /// functions or there are no more other calling conventions that support variadic functions. const calling_conventions_supporting_var_args = [_]std.builtin.CallingConvention{ .C, }; fn callConvSupportsVarArgs(cc: std.builtin.CallingConvention) bool { return for (calling_conventions_supporting_var_args) |supported_cc| { if (cc == supported_cc) return true; } else false; } fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: std.builtin.CallingConvention) CompileError!void { const CallingConventionsSupportingVarArgsList = struct { pub fn format(_: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = fmt; _ = options; for (calling_conventions_supporting_var_args, 0..) |cc_inner, i| { if (i != 0) try writer.writeAll(", "); try writer.print("'.{s}'", .{@tagName(cc_inner)}); } } }; if (!callConvSupportsVarArgs(cc)) { const msg = msg: { const msg = try sema.errMsg(src, "variadic function does not support '.{s}' calling convention", .{@tagName(cc)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "supported calling conventions: {}", .{CallingConventionsSupportingVarArgsList{}}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } const Section = union(enum) { generic, default, explicit: InternPool.NullTerminatedString, }; fn funcCommon( sema: *Sema, block: *Block, src_node_offset: i32, func_inst: Zir.Inst.Index, /// null means generic poison alignment: ?Alignment, /// null means generic poison address_space: ?std.builtin.AddressSpace, section: Section, /// null means generic poison cc: ?std.builtin.CallingConvention, /// this might be Type.generic_poison bare_return_type: Type, var_args: bool, inferred_error_set: bool, is_extern: bool, has_body: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const gpa = sema.gpa; const target = zcu.getTarget(); const ip = &zcu.intern_pool; const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = src_node_offset }); const cc_src = block.src(.{ .node_offset_fn_type_cc = src_node_offset }); const func_src = block.nodeOffset(src_node_offset); var is_generic = bare_return_type.isGenericPoison() or alignment == null or address_space == null or section == .generic or cc == null; if (var_args) { if (is_generic) { return sema.fail(block, func_src, "generic function cannot be variadic", .{}); } try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?); } const is_source_decl = sema.generic_owner == .none; // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; var comptime_bits: u32 = 0; for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| { const param_ty = Type.fromInterned(param_ty_ip); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; }; const param_src = block.src(.{ .fn_proto_param = .{ .fn_proto_node_offset = src_node_offset, .param_index = @intCast(i), } }); const requires_comptime = try sema.typeRequiresComptime(param_ty); if (param_is_comptime or requires_comptime) { comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error } const this_generic = param_ty.isGenericPoison(); is_generic = is_generic or this_generic; if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)}); } if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)}); } if (!param_ty.isValidParamType(zcu)) { const opaque_str = if (param_ty.zigTypeTag(zcu) == .Opaque) "opaque " else ""; return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{ opaque_str, param_ty.fmt(pt), }); } if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ param_ty.fmt(pt), @tagName(cc_resolved), }); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, param_src, param_ty, .param_ty); try sema.addDeclaredHereNote(msg, param_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (is_source_decl and requires_comptime and !param_is_comptime and has_body and !block.is_comptime) { const msg = msg: { const msg = try sema.errMsg(param_src, "parameter of type '{}' must be declared comptime", .{ param_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, param_src, param_ty); try sema.addDeclaredHereNote(msg, param_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (is_source_decl and !this_generic and is_noalias and !(param_ty.zigTypeTag(zcu) == .Pointer or param_ty.isPtrLikeOptional(zcu))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } switch (cc_resolved) { .Interrupt => if (target.cpu.arch.isX86()) { const err_code_size = target.ptrBitWidth(); switch (i) { 0 => if (param_ty.zigTypeTag(zcu) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}), 1 => if (param_ty.bitSize(pt) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}), else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}), } } else return sema.fail(block, param_src, "parameters are not allowed with 'Interrupt' calling convention", .{}), .Signal => return sema.fail(block, param_src, "parameters are not allowed with 'Signal' calling convention", .{}), else => {}, } } const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type); const ret_poison = bare_return_type.isGenericPoison(); const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; const param_types = block.params.items(.ty); if (!is_source_decl) { assert(has_body); assert(!is_generic); assert(comptime_bits == 0); assert(cc != null); assert(section != .generic); assert(address_space != null); assert(!var_args); if (inferred_error_set) { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); } const func_index = try ip.getFuncInstance(gpa, pt.tid, .{ .param_types = param_types, .noalias_bits = noalias_bits, .bare_return_type = bare_return_type.toIntern(), .cc = cc_resolved, .alignment = alignment.?, .section = switch (section) { .generic => unreachable, .default => .none, .explicit => |x| x.toOptional(), }, .is_noinline = is_noinline, .inferred_error_set = inferred_error_set, .generic_owner = sema.generic_owner, .comptime_args = sema.comptime_args, }); return finishFunc( sema, block, func_index, .none, ret_poison, bare_return_type, ret_ty_src, cc_resolved, is_source_decl, ret_ty_requires_comptime, func_inst, cc_src, is_noinline, is_generic, final_is_generic, ); } const section_name: InternPool.OptionalNullTerminatedString = switch (section) { .generic => .none, .default => .none, .explicit => |name| name.toOptional(), }; if (inferred_error_set) { assert(!is_extern); assert(has_body); if (!ret_poison) try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); const func_index = try ip.getFuncDeclIes(gpa, pt.tid, .{ .owner_nav = sema.getOwnerCauNav(), .param_types = param_types, .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, .bare_return_type = bare_return_type.toIntern(), .cc = cc, .alignment = alignment, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, .is_var_args = var_args, .is_generic = final_is_generic, .is_noinline = is_noinline, .zir_body_inst = try block.trackZir(func_inst), .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }); // func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`. ip.resolveNavValue(sema.getOwnerCauNav(), .{ .val = func_index, .alignment = alignment orelse .none, .@"linksection" = section_name, .@"addrspace" = address_space orelse .generic, }); return finishFunc( sema, block, func_index, .none, ret_poison, bare_return_type, ret_ty_src, cc_resolved, is_source_decl, ret_ty_requires_comptime, func_inst, cc_src, is_noinline, is_generic, final_is_generic, ); } const func_ty = try ip.getFuncType(gpa, pt.tid, .{ .param_types = param_types, .noalias_bits = noalias_bits, .comptime_bits = comptime_bits, .return_type = bare_return_type.toIntern(), .cc = cc, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, .is_var_args = var_args, .is_generic = final_is_generic, .is_noinline = is_noinline, }); if (is_extern) { assert(comptime_bits == 0); assert(cc != null); assert(alignment != null); assert(section != .generic); assert(address_space != null); assert(!is_generic); if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, block.src(.{ .node_offset_lib_name = src_node_offset, }), lib_name); const func_index = try pt.getExtern(.{ .name = sema.getOwnerCauNavName(), .ty = func_ty, .lib_name = try ip.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), .is_const = true, .is_threadlocal = false, .is_weak_linkage = false, .alignment = alignment orelse .none, .@"addrspace" = address_space orelse .generic, .zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction .owner_nav = undefined, // ignored by `getExtern` }); // Note that unlike function declaration, extern functions don't touch the // Sema's owner Cau's owner Nav. The alignment etc were passed above. return finishFunc( sema, block, func_index, func_ty, ret_poison, bare_return_type, ret_ty_src, cc_resolved, is_source_decl, ret_ty_requires_comptime, func_inst, cc_src, is_noinline, is_generic, final_is_generic, ); } if (has_body) { const func_index = try ip.getFuncDecl(gpa, pt.tid, .{ .owner_nav = sema.getOwnerCauNav(), .ty = func_ty, .cc = cc, .is_noinline = is_noinline, .zir_body_inst = try block.trackZir(func_inst), .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }); // func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`. ip.resolveNavValue(sema.getOwnerCauNav(), .{ .val = func_index, .alignment = alignment orelse .none, .@"linksection" = section_name, .@"addrspace" = address_space orelse .generic, }); return finishFunc( sema, block, func_index, func_ty, ret_poison, bare_return_type, ret_ty_src, cc_resolved, is_source_decl, ret_ty_requires_comptime, func_inst, cc_src, is_noinline, is_generic, final_is_generic, ); } return finishFunc( sema, block, .none, func_ty, ret_poison, bare_return_type, ret_ty_src, cc_resolved, is_source_decl, ret_ty_requires_comptime, func_inst, cc_src, is_noinline, is_generic, final_is_generic, ); } fn finishFunc( sema: *Sema, block: *Block, opt_func_index: InternPool.Index, func_ty: InternPool.Index, ret_poison: bool, bare_return_type: Type, ret_ty_src: LazySrcLoc, cc_resolved: std.builtin.CallingConvention, is_source_decl: bool, ret_ty_requires_comptime: bool, func_inst: Zir.Inst.Index, cc_src: LazySrcLoc, is_noinline: bool, is_generic: bool, final_is_generic: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const gpa = sema.gpa; const target = mod.getTarget(); const return_type: Type = if (opt_func_index == .none or ret_poison) bare_return_type else Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index))); if (!return_type.isValidReturnType(mod)) { const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(pt), }); } if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(pt), @tagName(cc_resolved), }); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src, return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } // If the return type is comptime-only but not dependent on parameters then // all parameter types also need to be comptime. if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime and !block.is_comptime) comptime_check: { for (block.params.items(.is_comptime)) |is_comptime| { if (!is_comptime) break; } else break :comptime_check; const msg = try sema.errMsg( ret_ty_src, "function with comptime-only return type '{}' requires all parameters to be comptime", .{return_type.fmt(pt)}, ); try sema.explainWhyTypeIsComptime(msg, ret_ty_src, return_type); const tags = sema.code.instructions.items(.tag); const data = sema.code.instructions.items(.data); const param_body = sema.code.getParamBody(func_inst); for ( block.params.items(.is_comptime), block.params.items(.name), param_body[0..block.params.len], ) |is_comptime, name_nts, param_index| { if (!is_comptime) { const param_src = block.tokenOffset(switch (tags[@intFromEnum(param_index)]) { .param => data[@intFromEnum(param_index)].pl_tok.src_tok, .param_anytype => data[@intFromEnum(param_index)].str_tok.src_tok, else => unreachable, }); const name = sema.code.nullTerminatedString(name_nts); if (name.len != 0) { try sema.errNote(param_src, msg, "param '{s}' is required to be comptime", .{name}); } else { try sema.errNote(param_src, msg, "param is required to be comptime", .{}); } } } return sema.failWithOwnedErrorMsg(block, msg); } switch (cc_resolved) { .Interrupt, .Signal => if (return_type.zigTypeTag(mod) != .Void and return_type.zigTypeTag(mod) != .NoReturn) { return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)}); }, .Inline => if (is_noinline) { return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); }, else => {}, } const arch = target.cpu.arch; if (@as(?[]const u8, switch (cc_resolved) { .Unspecified, .C, .Naked, .Async, .Inline => null, .Interrupt => switch (arch) { .x86, .x86_64, .avr, .msp430 => null, else => "x86, x86_64, AVR, and MSP430", }, .Signal => switch (arch) { .avr => null, else => "AVR", }, .Stdcall, .Fastcall, .Thiscall => switch (arch) { .x86 => null, else => "x86", }, .Vectorcall => switch (arch) { .x86, .aarch64, .aarch64_be => null, else => "x86 and AArch64", }, .APCS, .AAPCS, .AAPCSVFP => switch (arch) { .arm, .armeb, .aarch64, .aarch64_be, .thumb, .thumbeb => null, else => "ARM", }, .SysV, .Win64 => switch (arch) { .x86_64 => null, else => "x86_64", }, .Kernel => switch (arch) { .nvptx, .nvptx64, .amdgcn, .spirv, .spirv32, .spirv64 => null, else => "nvptx, amdgcn and SPIR-V", }, .Fragment, .Vertex => switch (arch) { .spirv, .spirv32, .spirv64 => null, else => "SPIR-V", }, })) |allowed_platform| { return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{ @tagName(cc_resolved), allowed_platform, @tagName(arch), }); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. const unresolved_stack_trace_ty = try pt.getBuiltinType("StackTrace"); try unresolved_stack_trace_ty.resolveFields(pt); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); } fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, comptime_syntax: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_tok; const src = block.tokenOffset(inst_data.src_tok); const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); const param_name: Zir.NullTerminatedString = extra.data.name; const body = sema.code.bodySlice(extra.end, extra.data.body_len); const param_ty = param_ty: { const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; const prev_no_partial_func_type = sema.no_partial_func_ty; const prev_generic_owner = sema.generic_owner; const prev_generic_call_src = sema.generic_call_src; block.params = .{}; sema.no_partial_func_ty = true; sema.generic_owner = .none; sema.generic_call_src = LazySrcLoc.unneeded; defer { block.params = prev_params; sema.no_partial_func_ty = prev_no_partial_func_type; sema.generic_owner = prev_generic_owner; sema.generic_call_src = prev_generic_call_src; } if (sema.resolveInlineBody(block, body, inst)) |param_ty_inst| { if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| { break :param_ty param_ty; } else |err| break :err err; } else |err| break :err err; }; switch (err) { error.GenericPoison => { // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.arena, .{ .ty = .generic_poison_type, .is_comptime = comptime_syntax, .name = param_name, }); sema.inst_map.putAssumeCapacity(inst, .generic_poison); return; }, else => |e| return e, } }; const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax; try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), .is_comptime = comptime_syntax, .name = param_name, }); if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. sema.inst_map.putAssumeCapacity(inst, .generic_poison); } else { // Otherwise we need a dummy runtime instruction. const result_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(sema.gpa, .{ .tag = .alloc, .data = .{ .ty = param_ty }, }); sema.inst_map.putAssumeCapacity(inst, result_index.toRef()); } } fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, comptime_syntax: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const param_name: Zir.NullTerminatedString = inst_data.start; // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.arena, .{ .ty = .generic_poison_type, .is_comptime = comptime_syntax, .name = param_name, }); sema.inst_map.putAssumeCapacity(inst, .generic_poison); } fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false); } fn zirAsShiftOperand(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; return sema.analyzeAs(block, src, extra.dest_type, extra.operand, true); } fn analyzeAs( sema: *Sema, block: *Block, src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand = try sema.resolveInst(zir_operand); const operand_air_inst = sema.resolveInst(zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; const dest_ty = sema.analyzeAsType(block, src, operand_air_inst) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; const dest_ty_tag = dest_ty.zigTypeTagOrPoison(mod) catch |err| switch (err) { error.GenericPoison => return operand, }; if (dest_ty_tag == .Opaque) { return sema.fail(block, src, "cannot cast to opaque type '{}'", .{dest_ty.fmt(pt)}); } if (dest_ty_tag == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (zir_dest_type.toIndex()) |ptr_index| sema.code.instructions.items(.tag)[@intFromEnum(ptr_index)] == .ret_type else false; return sema.coerceExtra(block, dest_ty, operand, src, .{ .is_ret = is_ret, .no_cast_to_comptime_int = no_cast_to_comptime_int }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; } fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const ptr_ty = operand_ty.scalarType(zcu); const is_vector = operand_ty.zigTypeTag(zcu) == .Vector; if (!ptr_ty.isPtrAtRuntime(zcu)) { return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}); } const pointee_ty = ptr_ty.childType(zcu); if (try sema.typeRequiresComptime(ptr_ty)) { const msg = msg: { const msg = try sema.errMsg(ptr_src, "comptime-only type '{}' has no pointer address", .{pointee_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, ptr_src, pointee_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (try sema.resolveValueIntable(operand)) |operand_val| ct: { if (!is_vector) { if (operand_val.isUndef(zcu)) { return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern()); } return Air.internedToRef((try pt.intValue( Type.usize, (try operand_val.getUnsignedIntAdvanced(pt, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len }); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { const ptr_val = try operand_val.elemValue(pt, i); if (ptr_val.isUndef(zcu)) { new_elem.* = (try pt.undefValue(Type.usize)).toIntern(); continue; } const addr = try ptr_val.getUnsignedIntAdvanced(pt, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; new_elem.* = (try pt.intValue( Type.usize, addr, )).toIntern(); } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src); try sema.validateRuntimeValue(block, ptr_src, operand); if (!is_vector) { return block.addUnOp(.int_from_ptr, operand); } const len = operand_ty.vectorLen(zcu); const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len }); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addUnOp(.int_from_ptr, old_elem); } return block.addAggregateInit(dest_ty, new_elems); } fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); const object = try sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); const object_ptr = try sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); const object_ptr = try sema.resolveInst(extra.lhs); const struct_ty = sema.typeOf(object_ptr).childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct, .Union => { return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, true); }, else => { return sema.failWithStructInitNotSupported(block, src, struct_ty); }, } } fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.builtinCallArgSrc(inst_data.src_node, 1); const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = try sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", }); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const field_name_src = block.builtinCallArgSrc(inst_data.src_node, 1); const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = try sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", }); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intCast"); const operand = try sema.resolveInst(extra.rhs); return sema.intCast(block, block.nodeOffset(inst_data.src_node), dest_ty, src, operand, operand_src, true); } fn intCast( sema: *Sema, block: *Block, src: LazySrcLoc, dest_ty: Type, dest_ty_src: LazySrcLoc, operand: Air.Inst.Ref, operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { const ok = if (is_vector) ok: { const zeros = try sema.splat(operand_ty, try pt.intValue(operand_scalar_ty, 0)); const zero_inst = Air.internedToRef(zeros.toIntern()); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = is_in_range, .operation = .And } }, }); break :ok all_in_range; } else ok: { const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern()); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; try sema.addSafetyCheck(block, src, ok, .cast_truncated_data); } } return Air.internedToRef(opv.toIntern()); } try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { const actual_info = operand_scalar_ty.intInfo(mod); const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @intFromBool(actual_info.signedness == .signed); const wanted_value_bits = wanted_bits - @intFromBool(wanted_info.signedness == .signed); // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(pt, operand_scalar_ty); const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = Air.internedToRef(dest_max_val.toIntern()); if (actual_info.signedness == .signed) { const diff = try block.addBinOp(.sub_wrap, dest_max, operand); // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. const unsigned_scalar_operand_ty = try pt.intType(.unsigned, actual_bits); const unsigned_operand_ty = if (is_vector) try pt.vectorType(.{ .len = dest_ty.vectorLen(mod), .child = unsigned_scalar_operand_ty.toIntern(), }) else unsigned_scalar_operand_ty; const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { const one_scalar = try pt.intValue(unsigned_scalar_operand_ty, 1); const one = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = unsigned_operand_ty.toIntern(), .storage = .{ .repeated_elem = one_scalar.toIntern() }, } })) else one_scalar; const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, pt); break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined); } else try pt.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = Air.internedToRef(dest_range_val.toIntern()); const ok = if (is_vector) ok: { const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte); const all_in_range = try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = is_in_range, .operation = .And, } }, }); break :ok all_in_range; } else ok: { const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range); break :ok is_in_range; }; // TODO negative_to_unsigned? try sema.addSafetyCheck(block, src, ok, .cast_truncated_data); } else { const ok = if (is_vector) ok: { const is_in_range = try block.addCmpVector(operand, dest_max, .lte); const all_in_range = try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = is_in_range, .operation = .And, } }, }); break :ok all_in_range; } else ok: { const is_in_range = try block.addBinOp(.cmp_lte, operand, dest_max); break :ok is_in_range; }; try sema.addSafetyCheck(block, src, ok, .cast_truncated_data); } } else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) { // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { const scalar_zero = try pt.intValue(operand_scalar_ty, 0); const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = Air.internedToRef(zero_val.toIntern()); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = is_in_range, .operation = .And, } }, }); break :ok all_in_range; } else ok: { const zero_inst = Air.internedToRef((try pt.intValue(operand_ty, 0)).toIntern()); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; try sema.addSafetyCheck(block, src, ok, .negative_to_unsigned); } } return block.addTyOp(.intcast, dest_ty, operand); } fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@bitCast"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .ErrorSet, .ErrorUnion, .Fn, .Frame, .NoReturn, .Null, .Opaque, .Optional, .Type, .Undefined, .Void, => return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}), .Enum => { const msg = msg: { const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}), else => {}, } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Pointer => { const msg = msg: { const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}), .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}), else => {}, } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Struct, .Union => if (dest_ty.containerLayout(mod) == .auto) { const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ dest_ty.fmt(pt), container, }); }, .Array, .Bool, .Float, .Int, .Vector, => {}, } switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .ErrorSet, .ErrorUnion, .Fn, .Frame, .NoReturn, .Null, .Opaque, .Optional, .Type, .Undefined, .Void, => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}), .Enum => { const msg = msg: { const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}), else => {}, } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Pointer => { const msg = msg: { const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}), .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}), else => {}, } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Struct, .Union => if (operand_ty.containerLayout(mod) == .auto) { const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{ operand_ty.fmt(pt), container, }); }, .Array, .Bool, .Float, .Int, .Vector, => {}, } return sema.bitCast(block, dest_ty, operand, block.nodeOffset(inst_data.src_node), operand_src); } fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatCast"); const dest_scalar_ty = dest_ty.scalarType(mod); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = operand_ty.scalarType(mod); try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src); const is_vector = dest_ty.zigTypeTag(mod) == .Vector; const target = mod.getTarget(); const dest_is_comptime_float = switch (dest_scalar_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( block, src, "expected float or vector type, found '{}'", .{dest_ty.fmt(pt)}, ), }; switch (operand_scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, operand_src, "expected float or vector type, found '{}'", .{operand_ty.fmt(pt)}, ), } if (try sema.resolveValue(operand)) |operand_val| { if (!is_vector) { return Air.internedToRef((try operand_val.floatCast(dest_ty, pt)).toIntern()); } const vec_len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(InternPool.Index, vec_len); for (new_elems, 0..) |*new_elem, i| { const old_elem = try operand_val.elemValue(pt, i); new_elem.* = (try old_elem.floatCast(dest_scalar_ty, pt)).toIntern(); } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); } try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), operand_src); const src_bits = operand_scalar_ty.floatBits(target); const dst_bits = dest_scalar_ty.floatBits(target); if (dst_bits >= src_bits) { return sema.coerce(block, dest_ty, operand, operand_src); } if (!is_vector) { return block.addTyOp(.fptrunc, dest_ty, operand); } const vec_len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem); } return block.addAggregateInit(dest_ty, new_elems); } fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); return sema.elemVal(block, src, array, elem_index, src, false); } fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const elem_index_src = block.src(.{ .node_offset_array_access_index = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = try sema.resolveInst(extra.lhs); const uncoerced_elem_index = try sema.resolveInst(extra.rhs); const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); return sema.elemVal(block, src, array, elem_index, elem_index_src, true); } fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm; const array = try sema.resolveInst(inst_data.operand); const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx); return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false); } fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src = block.src(.{ .for_capture_from_input = inst_data.src_node }); const msg = msg: { const msg = try sema.errMsg(capture_src, "pointer capture of non pointer type '{}'", .{ indexable_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (indexable_ty.isIndexable(mod)) { try sema.errNote(src, msg, "consider using '&' here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } return sema.elemPtrOneLayerOnly(block, src, array_ptr, elem_index, src, false, false); } fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const elem_index_src = block.src(.{ .node_offset_array_access_index = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const uncoerced_elem_index = try sema.resolveInst(extra.rhs); const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true); } fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.ptr); const elem_index = try pt.intRef(Type.usize, extra.index); const array_ty = sema.typeOf(array_ptr).childType(mod); switch (array_ty.zigTypeTag(mod)) { .Array, .Vector => {}, else => if (!array_ty.isTuple(mod)) { return sema.failWithArrayInitNotSupported(block, src, array_ty); }, } return sema.elemPtr(block, src, array_ptr, elem_index, src, true, true); } fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const ptr_src = block.src(.{ .node_offset_slice_ptr = inst_data.src_node }); const start_src = block.src(.{ .node_offset_slice_start = inst_data.src_node }); const end_src = block.src(.{ .node_offset_slice_end = inst_data.src_node }); return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, LazySrcLoc.unneeded, ptr_src, start_src, end_src, false); } fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const end = try sema.resolveInst(extra.end); const ptr_src = block.src(.{ .node_offset_slice_ptr = inst_data.src_node }); const start_src = block.src(.{ .node_offset_slice_start = inst_data.src_node }); const end_src = block.src(.{ .node_offset_slice_end = inst_data.src_node }); return sema.analyzeSlice(block, src, array_ptr, start, end, .none, LazySrcLoc.unneeded, ptr_src, start_src, end_src, false); } fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const sentinel_src = block.src(.{ .node_offset_slice_sentinel = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const end: Air.Inst.Ref = if (extra.end == .none) .none else try sema.resolveInst(extra.end); const sentinel = try sema.resolveInst(extra.sentinel); const ptr_src = block.src(.{ .node_offset_slice_ptr = inst_data.src_node }); const start_src = block.src(.{ .node_offset_slice_start = inst_data.src_node }); const end_src = block.src(.{ .node_offset_slice_end = inst_data.src_node }); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src, false); } fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.SliceLength, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const len = try sema.resolveInst(extra.len); const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel); const ptr_src = block.src(.{ .node_offset_slice_ptr = inst_data.src_node }); const start_src = block.src(.{ .node_offset_slice_start = extra.start_src_node_offset }); const end_src = block.src(.{ .node_offset_slice_end = inst_data.src_node }); const sentinel_src: LazySrcLoc = if (sentinel == .none) LazySrcLoc.unneeded else block.src(.{ .node_offset_slice_sentinel = inst_data.src_node }); return sema.analyzeSlice(block, src, array_ptr, start, len, sentinel, sentinel_src, ptr_src, start_src, end_src, true); } /// Holds common data used when analyzing or resolving switch prong bodies, /// including setting up captures. const SwitchProngAnalysis = struct { sema: *Sema, /// The block containing the `switch_block` itself. parent_block: *Block, /// The raw switch operand value (*not* the condition). Always defined. operand: Air.Inst.Ref, /// May be `undefined` if no prong has a by-ref capture. operand_ptr: Air.Inst.Ref, /// The switch condition value. For unions, `operand` is the union and `cond` is its tag. cond: Air.Inst.Ref, /// If this switch is on an error set, this is the type to assign to the /// `else` prong. If `null`, the prong should be unreachable. else_error_ty: ?Type, /// The index of the `switch_block` instruction itself. switch_block_inst: Zir.Inst.Index, /// The dummy index into which inline tag captures should be placed. May be /// undefined if no prong has a tag capture. tag_capture_inst: Zir.Inst.Index, /// Resolve a switch prong which is determined at comptime to have no peers. /// Uses `resolveBlockBody`. Sets up captures as needed. fn resolveProngComptime( spa: SwitchProngAnalysis, child_block: *Block, prong_type: enum { normal, special }, prong_body: []const Zir.Inst.Index, capture: Zir.Inst.SwitchBlock.ProngInfo.Capture, /// Must use the `switch_capture` field in `offset`. capture_src: LazySrcLoc, /// The set of all values which can reach this prong. May be undefined /// if the prong is special or contains ranges. case_vals: []const Air.Inst.Ref, /// The inline capture of this prong. If this is not an inline prong, /// this is `.none`. inline_case_capture: Air.Inst.Ref, /// Whether this prong has an inline tag capture. If `true`, then /// `inline_case_capture` cannot be `.none`. has_tag_capture: bool, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { const sema = spa.sema; const src = spa.parent_block.nodeOffset( sema.code.instructions.items(.data)[@intFromEnum(spa.switch_block_inst)].pl_node.src_node, ); if (has_tag_capture) { const tag_ref = try spa.analyzeTagCapture(child_block, capture_src, inline_case_capture); sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref); } defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst)); switch (capture) { .none => { return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges); }, .by_val, .by_ref => { const capture_ref = try spa.analyzeCapture( child_block, capture == .by_ref, prong_type == .special, capture_src, case_vals, inline_case_capture, ); if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) { // This prong should be unreachable! return .unreachable_value; } sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref); defer assert(sema.inst_map.remove(spa.switch_block_inst)); return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges); }, } } /// Analyze a switch prong which may have peers at runtime. /// Uses `analyzeBodyRuntimeBreak`. Sets up captures as needed. fn analyzeProngRuntime( spa: SwitchProngAnalysis, case_block: *Block, prong_type: enum { normal, special }, prong_body: []const Zir.Inst.Index, capture: Zir.Inst.SwitchBlock.ProngInfo.Capture, /// Must use the `switch_capture` field in `offset`. capture_src: LazySrcLoc, /// The set of all values which can reach this prong. May be undefined /// if the prong is special or contains ranges. case_vals: []const Air.Inst.Ref, /// The inline capture of this prong. If this is not an inline prong, /// this is `.none`. inline_case_capture: Air.Inst.Ref, /// Whether this prong has an inline tag capture. If `true`, then /// `inline_case_capture` cannot be `.none`. has_tag_capture: bool, ) CompileError!void { const sema = spa.sema; if (has_tag_capture) { const tag_ref = try spa.analyzeTagCapture(case_block, capture_src, inline_case_capture); sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref); } defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst)); switch (capture) { .none => { return sema.analyzeBodyRuntimeBreak(case_block, prong_body); }, .by_val, .by_ref => { const capture_ref = try spa.analyzeCapture( case_block, capture == .by_ref, prong_type == .special, capture_src, case_vals, inline_case_capture, ); if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) { // No need to analyze any further, the prong is unreachable return; } sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref); defer assert(sema.inst_map.remove(spa.switch_block_inst)); return sema.analyzeBodyRuntimeBreak(case_block, prong_body); }, } } fn analyzeTagCapture( spa: SwitchProngAnalysis, block: *Block, capture_src: LazySrcLoc, inline_case_capture: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const sema = spa.sema; const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(spa.operand); if (operand_ty.zigTypeTag(mod) != .Union) { const tag_capture_src: LazySrcLoc = .{ .base_node_inst = capture_src.base_node_inst, .offset = .{ .switch_tag_capture = capture_src.offset.switch_capture }, }; return sema.fail(block, tag_capture_src, "cannot capture tag of non-union type '{}'", .{ operand_ty.fmt(pt), }); } assert(inline_case_capture != .none); return inline_case_capture; } fn analyzeCapture( spa: SwitchProngAnalysis, block: *Block, capture_byref: bool, is_special_prong: bool, capture_src: LazySrcLoc, case_vals: []const Air.Inst.Ref, inline_case_capture: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const sema = spa.sema; const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const zir_datas = sema.code.instructions.items(.data); const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node; const operand_ty = sema.typeOf(spa.operand); const operand_ptr_ty = if (capture_byref) sema.typeOf(spa.operand_ptr) else undefined; const operand_src = block.src(.{ .node_offset_switch_operand = switch_node_offset }); if (inline_case_capture != .none) { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inline_case_capture, undefined) catch unreachable; if (operand_ty.zigTypeTag(zcu) == .Union) { const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?); const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), .is_volatile = operand_ptr_ty.isVolatilePtr(zcu), .address_space = operand_ptr_ty.ptrAddressSpace(zcu), }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { return Air.internedToRef((try union_ptr.ptrField(field_index, pt)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |union_val| { const tag_and_val = ip.indexToKey(union_val.toIntern()).un; return Air.internedToRef(tag_and_val.val); } return block.addStructFieldVal(spa.operand, field_index, field_ty); } } else if (capture_byref) { return sema.uavRef(item_val.toIntern()); } else { return inline_case_capture; } } if (is_special_prong) { if (capture_byref) { return spa.operand_ptr; } switch (operand_ty.zigTypeTag(zcu)) { .ErrorSet => if (spa.else_error_ty) |ty| { return sema.bitCast(block, ty, spa.operand, operand_src, null); } else { try sema.analyzeUnreachable(block, operand_src, false); return .unreachable_value; }, else => return spa.operand, } } switch (operand_ty.zigTypeTag(zcu)) { .Union => { const union_obj = zcu.typeToUnion(operand_ty).?; const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable; const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?; const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]); const field_indices = try sema.arena.alloc(u32, case_vals.len); for (case_vals, field_indices) |item, *field_idx| { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; field_idx.* = zcu.unionTagFieldIndex(union_obj, item_val).?; } // Fast path: if all the operands are the same type already, we don't need to hit // PTR! This will also allow us to emit simpler code. const same_types = for (field_indices[1..]) |field_idx| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); if (!field_ty.eql(first_field_ty, zcu)) break false; } else true; const capture_ty = if (same_types) first_field_ty else capture_ty: { // We need values to run PTR on, so make a bunch of undef constants. const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (dummy_captures, field_indices) |*dummy, field_idx| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); dummy.* = try pt.undefRef(field_ty); } const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len); for (case_srcs, 0..) |*case_src, i| { case_src.* = .{ .base_node_inst = capture_src.base_node_inst, .offset = .{ .switch_case_item = .{ .switch_node_offset = switch_node_offset, .case_idx = capture_src.offset.switch_capture.case_idx, .item_idx = .{ .kind = .single, .index = @intCast(i) }, } }, }; } break :capture_ty sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return error.AnalysisFail; try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{}); return error.AnalysisFail; }, else => |e| return e, }; }; // By-reference captures have some further restrictions which make them easier to emit if (capture_byref) { const operand_ptr_info = operand_ptr_ty.ptrInfo(zcu); const capture_ptr_ty = resolve: { // By-ref captures of hetereogeneous types are only allowed if all field // pointer types are peer resolvable to each other. // We need values to run PTR on, so make a bunch of undef constants. const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); const field_ptr_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, .is_volatile = operand_ptr_info.flags.is_volatile, .address_space = operand_ptr_info.flags.address_space, .alignment = union_obj.fieldAlign(ip, field_idx), }, }); dummy.* = try pt.undefRef(field_ptr_ty); } const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len); for (case_srcs, 0..) |*case_src, i| { case_src.* = .{ .base_node_inst = capture_src.base_node_inst, .offset = .{ .switch_case_item = .{ .switch_node_offset = switch_node_offset, .case_idx = capture_src.offset.switch_capture.case_idx, .item_idx = .{ .kind = .single, .index = @intCast(i) }, } }, }; } break :resolve sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return error.AnalysisFail; try sema.errNote(capture_src, msg, "this coercion is only possible when capturing by value", .{}); try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{}); return error.AnalysisFail; }, else => |e| return e, }; }; if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty); const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt); return Air.internedToRef((try pt.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(spa.operand_ptr, first_field_index, capture_ptr_ty); } if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| { if (operand_val.isUndef(zcu)) return pt.undefRef(capture_ty); const union_val = ip.indexToKey(operand_val.toIntern()).un; if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty); const uncoerced = Air.internedToRef(union_val.val); return sema.coerce(block, capture_ty, uncoerced, operand_src); } try sema.requireRuntimeBlock(block, operand_src, null); if (same_types) { return block.addStructFieldVal(spa.operand, first_field_index, capture_ty); } // We may have to emit a switch block which coerces the operand to the capture type. // If we can, try to avoid that using in-memory coercions. const first_non_imc = in_mem: { for (field_indices, 0..) |field_idx, i| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) { break :in_mem i; } } // All fields are in-memory coercible to the resolved type! // Just take the first field and bitcast the result. const uncoerced = try block.addStructFieldVal(spa.operand, first_field_index, first_field_ty); return block.addBitCast(capture_ty, uncoerced); }; // By-val capture with heterogeneous types which are not all in-memory coercible to // the resolved capture type. We finally have to fall back to the ugly method. // However, let's first track which operands are in-memory coercible. There may well // be several, and we can squash all of these cases into the same switch prong using // a simple bitcast. We'll make this the 'else' prong. var in_mem_coercible = try std.DynamicBitSet.initFull(sema.arena, field_indices.len); in_mem_coercible.unset(first_non_imc); { const next = first_non_imc + 1; for (field_indices[next..], next..) |field_idx, i| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) { in_mem_coercible.unset(i); } } } const capture_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(capture_ty.toIntern()), .payload = undefined, // updated below }, }, }); const prong_count = field_indices.len - in_mem_coercible.count(); const estimated_extra = prong_count * 6; // 2 for Case, 1 item, probably 3 insts var cases_extra = try std.ArrayList(u32).initCapacity(sema.gpa, estimated_extra); defer cases_extra.deinit(); { // Non-bitcast cases var it = in_mem_coercible.iterator(.{ .kind = .unset }); while (it.next()) |idx| { var coerce_block = block.makeSubBlock(); defer coerce_block.instructions.deinit(sema.gpa); const case_src: LazySrcLoc = .{ .base_node_inst = capture_src.base_node_inst, .offset = .{ .switch_case_item = .{ .switch_node_offset = switch_node_offset, .case_idx = capture_src.offset.switch_capture.case_idx, .item_idx = .{ .kind = .single, .index = @intCast(idx) }, } }, }; const field_idx = field_indices[idx]; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); const uncoerced = try coerce_block.addStructFieldVal(spa.operand, field_idx, field_ty); const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); _ = try coerce_block.addBr(capture_block_inst, coerced); try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(coerce_block.instructions.items.len)); // body_len cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item cases_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); // body } } const else_body_len = len: { // 'else' prong uses a bitcast var coerce_block = block.makeSubBlock(); defer coerce_block.instructions.deinit(sema.gpa); const first_imc_item_idx = in_mem_coercible.findFirstSet().?; const first_imc_field_idx = field_indices[first_imc_item_idx]; const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]); const uncoerced = try coerce_block.addStructFieldVal(spa.operand, first_imc_field_idx, first_imc_field_ty); const coerced = try coerce_block.addBitCast(capture_ty, uncoerced); _ = try coerce_block.addBr(capture_block_inst, coerced); try cases_extra.appendSlice(@ptrCast(coerce_block.instructions.items)); break :len coerce_block.instructions.items.len; }; try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + cases_extra.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); const switch_br_inst: u32 = @intCast(sema.air_instructions.len); try sema.air_instructions.append(sema.gpa, .{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = spa.cond, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ .cases_len = @intCast(prong_count), .else_body_len = @intCast(else_body_len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); // Set up block body sema.air_instructions.items(.data)[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1, }); sema.air_extra.appendAssumeCapacity(switch_br_inst); return capture_block_inst.toRef(); }, .ErrorSet => { if (capture_byref) { return sema.fail( block, capture_src, "error set cannot be captured by reference", .{}, ); } if (case_vals.len == 1) { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable; const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?); return sema.bitCast(block, item_ty, spa.operand, operand_src, null); } var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, case_vals.len); for (case_vals) |err| { const err_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, err, undefined) catch unreachable; names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {}); } const error_ty = try pt.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, error_ty, spa.operand, operand_src, null); }, else => { // In this case the capture value is just the passed-through value // of the switch condition. if (capture_byref) { return spa.operand_ptr; } else { return spa.operand; } }, } } }; fn switchCond( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, .Int, .Float, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Pointer, .Fn, .ErrorSet, .Enum, => { if (operand_ty.isSlice(mod)) { return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } return operand; }, .Union => { try operand_ty.resolveFields(pt); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); if (operand_ty.srcLocOrNull(mod)) |union_src| { try sema.errNote(union_src, msg, "consider 'union(enum)' here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; return sema.unionToTag(block, enum_ty, operand, src); }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .Opaque, .Vector, .Frame, .AnyFrame, => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}), } } const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, LazySrcLoc); fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const switch_src = block.nodeOffset(inst_data.src_node); const switch_src_node_offset = inst_data.src_node; const switch_operand_src = block.src(.{ .node_offset_switch_operand = switch_src_node_offset }); const else_prong_src = block.src(.{ .node_offset_switch_special_prong = switch_src_node_offset }); const extra = sema.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index); const main_operand_src = block.src(.{ .node_offset_if_cond = extra.data.main_src_node_offset }); const main_src = block.src(.{ .node_offset_main_token = extra.data.main_src_node_offset }); const raw_operand_val = try sema.resolveInst(extra.data.operand); // AstGen guarantees that the instruction immediately preceding // switch_block_err_union is a dbg_stmt const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1); var header_extra_index: usize = extra.end; const scalar_cases_len = extra.data.bits.scalar_cases_len; const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { const multi_cases_len = sema.code.extra[header_extra_index]; header_extra_index += 1; break :blk multi_cases_len; } else 0; const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: { const err_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]); header_extra_index += 1; // SwitchProngAnalysis wants inst_map to have space for the tag capture. // Note that the normal capture is referred to via the switch block // index, which there is already necessarily space for. try sema.inst_map.ensureSpaceForInstructions(gpa, &.{err_capture_inst}); break :blk err_capture_inst; } else undefined; var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len); defer case_vals.deinit(gpa); const NonError = struct { body: []const Zir.Inst.Index, end: usize, capture: Zir.Inst.SwitchBlock.ProngInfo.Capture, }; const non_error_case: NonError = non_error: { const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]); const extra_body_start = header_extra_index + 1; break :non_error .{ .body = sema.code.bodySlice(extra_body_start, info.body_len), .end = extra_body_start + info.body_len, .capture = info.capture, }; }; const Else = struct { body: []const Zir.Inst.Index, end: usize, is_inline: bool, has_capture: bool, }; const else_case: Else = if (!extra.data.bits.has_else) .{ .body = &.{}, .end = non_error_case.end, .is_inline = false, .has_capture = false, } else special: { const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[non_error_case.end]); const extra_body_start = non_error_case.end + 1; assert(info.capture != .by_ref); assert(!info.has_tag_capture); break :special .{ .body = sema.code.bodySlice(extra_body_start, info.body_len), .end = extra_body_start + info.body_len, .is_inline = info.is_inline, .has_capture = info.capture != .none, }; }; var seen_errors = SwitchErrorSet.init(gpa); defer seen_errors.deinit(); const operand_ty = sema.typeOf(raw_operand_val); const operand_err_set = if (extra.data.bits.payload_is_ref) operand_ty.childType(mod) else operand_ty; if (operand_err_set.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, switch_src, "expected error union type, found '{}'", .{ operand_ty.fmt(pt), }); } const operand_err_set_ty = operand_err_set.errorUnionSet(mod); const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = block, .sema = sema, .namespace = block.namespace, .instructions = .{}, .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, .comptime_reason = block.comptime_reason, .is_typeof = block.is_typeof, .c_import_buf = block.c_import_buf, .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, .error_return_trace_index = block.error_return_trace_index, .want_safety = block.want_safety, .src_base_inst = block.src_base_inst, .type_name_ctx = block.type_name_ctx, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); const resolved_err_set = try sema.resolveInferredErrorSetTy(block, main_src, operand_err_set_ty.toIntern()); if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(mod)) { return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges); } const else_error_ty: ?Type = try validateErrSetSwitch( sema, block, &seen_errors, &case_vals, operand_err_set_ty, inst_data, scalar_cases_len, multi_cases_len, .{ .body = else_case.body, .end = else_case.end, .src = else_prong_src }, extra.data.bits.has_else, ); var spa: SwitchProngAnalysis = .{ .sema = sema, .parent_block = block, .operand = undefined, // must be set to the unwrapped error code before use .operand_ptr = .none, .cond = raw_operand_val, .else_error_ty = else_error_ty, .switch_block_inst = inst, .tag_capture_inst = undefined, }; if (try sema.resolveDefinedValue(&child_block, main_src, raw_operand_val)) |ov| { const operand_val = if (extra.data.bits.payload_is_ref) (try sema.pointerDeref(&child_block, main_src, ov, operand_ty)).? else ov; if (operand_val.errorUnionIsPayload(mod)) { return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges); } else { const err_val = Value.fromInterned(try pt.intern(.{ .err = .{ .ty = operand_err_set_ty.toIntern(), .name = operand_val.getErrorName(mod).unwrap().?, }, })); spa.operand = if (extra.data.bits.payload_is_ref) try sema.analyzeErrUnionCodePtr(block, switch_operand_src, raw_operand_val) else try sema.analyzeErrUnionCode(block, switch_operand_src, raw_operand_val); if (extra.data.bits.any_uses_err_capture) { sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand); } defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst)); return resolveSwitchComptime( sema, spa, &child_block, try sema.switchCond(block, switch_operand_src, spa.operand), err_val, operand_err_set_ty, switch_src_node_offset, .{ .body = else_case.body, .end = else_case.end, .capture = if (else_case.has_capture) .by_val else .none, .is_inline = else_case.is_inline, .has_tag_capture = false, }, case_vals, scalar_cases_len, multi_cases_len, true, false, ); } } if (scalar_cases_len + multi_cases_len == 0) { if (else_error_ty) |ty| if (ty.errorSetIsEmpty(mod)) { return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges); }; } if (child_block.is_comptime) { _ = try sema.resolveConstDefinedValue(&child_block, main_operand_src, raw_operand_val, .{ .needed_comptime_reason = "condition in comptime switch must be comptime-known", .block_comptime_reason = child_block.comptime_reason, }); unreachable; } const cond = if (extra.data.bits.payload_is_ref) blk: { try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(mod)); const loaded = try sema.analyzeLoad(block, main_src, raw_operand_val, main_src); break :blk try sema.analyzeIsNonErr(block, main_src, loaded); } else blk: { try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val)); break :blk try sema.analyzeIsNonErr(block, main_src, raw_operand_val); }; var sub_block = child_block.makeSubBlock(); sub_block.runtime_loop = null; sub_block.runtime_cond = main_operand_src; sub_block.runtime_index.increment(); sub_block.need_debug_scope = null; // this body is emitted regardless defer sub_block.instructions.deinit(gpa); try sema.analyzeBodyRuntimeBreak(&sub_block, non_error_case.body); const true_instructions = try sub_block.instructions.toOwnedSlice(gpa); defer gpa.free(true_instructions); spa.operand = if (extra.data.bits.payload_is_ref) try sema.analyzeErrUnionCodePtr(&sub_block, switch_operand_src, raw_operand_val) else try sema.analyzeErrUnionCode(&sub_block, switch_operand_src, raw_operand_val); if (extra.data.bits.any_uses_err_capture) { sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand); } defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst)); _ = try sema.analyzeSwitchRuntimeBlock( spa, &sub_block, switch_src, try sema.switchCond(block, switch_operand_src, spa.operand), operand_err_set_ty, switch_operand_src, case_vals, .{ .body = else_case.body, .end = else_case.end, .capture = if (else_case.has_capture) .by_val else .none, .is_inline = else_case.is_inline, .has_tag_capture = false, }, scalar_cases_len, multi_cases_len, false, undefined, true, switch_src_node_offset, else_prong_src, undefined, seen_errors, undefined, undefined, undefined, cond_dbg_node_index, true, ); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + true_instructions.len + sub_block.instructions.items.len); _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(true_instructions.len), .else_body_len = @intCast(sub_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items)); return sema.resolveAnalyzedBlock(block, main_src, &child_block, merges, false); } fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_ref: bool) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const src_node_offset = inst_data.src_node; const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset }); const special_prong_src = block.src(.{ .node_offset_switch_special_prong = src_node_offset }); const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); const raw_operand_val: Air.Inst.Ref, const raw_operand_ptr: Air.Inst.Ref = blk: { const maybe_ptr = try sema.resolveInst(extra.data.operand); if (operand_is_ref) { const val = try sema.analyzeLoad(block, src, maybe_ptr, operand_src); break :blk .{ val, maybe_ptr }; } else { break :blk .{ maybe_ptr, undefined }; } }; const operand = try sema.switchCond(block, operand_src, raw_operand_val); // AstGen guarantees that the instruction immediately preceding // switch_block(_ref) is a dbg_stmt const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1); var header_extra_index: usize = extra.end; const scalar_cases_len = extra.data.bits.scalar_cases_len; const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { const multi_cases_len = sema.code.extra[header_extra_index]; header_extra_index += 1; break :blk multi_cases_len; } else 0; const tag_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_has_tag_capture) blk: { const tag_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]); header_extra_index += 1; // SwitchProngAnalysis wants inst_map to have space for the tag capture. // Note that the normal capture is referred to via the switch block // index, which there is already necessarily space for. try sema.inst_map.ensureSpaceForInstructions(gpa, &.{tag_capture_inst}); break :blk tag_capture_inst; } else undefined; var case_vals = try std.ArrayListUnmanaged(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len); defer case_vals.deinit(gpa); const special_prong = extra.data.bits.specialProng(); const special: SpecialProng = switch (special_prong) { .none => .{ .body = &.{}, .end = header_extra_index, .capture = .none, .is_inline = false, .has_tag_capture = false, }, .under, .@"else" => blk: { const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]); const extra_body_start = header_extra_index + 1; break :blk .{ .body = sema.code.bodySlice(extra_body_start, info.body_len), .end = extra_body_start + info.body_len, .capture = info.capture, .is_inline = info.is_inline, .has_tag_capture = info.has_tag_capture, }; }, }; const maybe_union_ty = sema.typeOf(raw_operand_val); const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?LazySrcLoc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); var range_set = RangeSet.init(gpa, pt); var true_count: u8 = 0; var false_count: u8 = 0; defer { range_set.deinit(); gpa.free(seen_enum_fields); seen_errors.deinit(); } var empty_enum = false; const operand_ty = sema.typeOf(operand); const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; // Validate usage of '_' prongs. if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { const msg = msg: { const msg = try sema.errMsg( src, "'_' prong only allowed when switching on non-exhaustive enums", .{}, ); errdefer msg.destroy(gpa); try sema.errNote( special_prong_src, msg, "'_' prong here", .{}, ); try sema.errNote( src, msg, "consider using 'else'", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } // Validate for duplicate items, missing else prong, and invalid range. switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in `switchCond` .Enum => { seen_enum_fields = try gpa.alloc(?LazySrcLoc, operand_ty.enumFieldCount(mod)); empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum( block, seen_enum_fields, &range_set, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, .item_idx = .{ .kind = .single, .index = 0 }, } }), )); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; try case_vals.ensureUnusedCapacity(gpa, items.len); for (items, 0..) |item_ref, item_i| { case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum( block, seen_enum_fields, &range_set, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } }), )); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } const all_tags_handled = for (seen_enum_fields) |seen_src| { if (seen_src == null) break false; } else true; if (special_prong == .@"else") { if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); } else if (!all_tags_handled) { const msg = msg: { const msg = try sema.errMsg( src, "switch must handle all possibilities", .{}, ); errdefer msg.destroy(sema.gpa); for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; const field_name = operand_ty.enumFieldName(i, mod); try sema.addFieldErrNote( operand_ty, i, msg, "unhandled enumeration value: '{}'", .{field_name.fmt(&mod.intern_pool)}, ); } try sema.errNote( operand_ty.srcLoc(mod), msg, "enum '{}' declared here", .{operand_ty.fmt(pt)}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail( block, src, "switch on non-exhaustive enum must include 'else' or '_' prong", .{}, ); } }, .ErrorSet => else_error_ty = try validateErrSetSwitch( sema, block, &seen_errors, &case_vals, operand_ty, inst_data, scalar_cases_len, multi_cases_len, .{ .body = special.body, .end = special.end, .src = special_prong_src }, special_prong == .@"else", ), .Int, .ComptimeInt => { var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt( block, &range_set, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, .item_idx = .{ .kind = .single, .index = 0 }, } }), )); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; try case_vals.ensureUnusedCapacity(gpa, items.len); for (items, 0..) |item_ref, item_i| { case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt( block, &range_set, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } }), )); } try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len); var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { const item_first: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const item_last: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const vals = try sema.validateSwitchRange( block, &range_set, item_first, item_last, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .range, .index = @intCast(range_i) }, } }), ); case_vals.appendAssumeCapacity(vals[0]); case_vals.appendAssumeCapacity(vals[1]); } extra_index += info.body_len; } } check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { const min_int = try operand_ty.minInt(pt, operand_ty); const max_int = try operand_ty.maxInt(pt, operand_ty); if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); } break :check_range; } } if (special_prong != .@"else") { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } } }, .Bool => { var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, .item_idx = .{ .kind = .single, .index = 0 }, } }), )); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; try case_vals.ensureUnusedCapacity(gpa, items.len); for (items, 0..) |item_ref, item_i| { case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } }), )); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (special_prong) { .@"else" => { if (true_count + false_count == 2) { return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); } }, .under, .none => { if (true_count + false_count < 2) { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } }, } }, .EnumLiteral, .Void, .Fn, .Pointer, .Type => { if (special_prong != .@"else") { return sema.fail( block, src, "else prong required when switching on type '{}'", .{operand_ty.fmt(pt)}, ); } var seen_values = ValueSrcMap{}; defer seen_values.deinit(gpa); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; extra_index += info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse( block, &seen_values, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, .item_idx = .{ .kind = .single, .index = 0 }, } }), )); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; try case_vals.ensureUnusedCapacity(gpa, items.len); for (items, 0..) |item_ref, item_i| { case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse( block, &seen_values, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } }), )); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .Opaque, .Vector, .Frame, .AnyFrame, .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ operand_ty.fmt(pt), }), } const spa: SwitchProngAnalysis = .{ .sema = sema, .parent_block = block, .operand = raw_operand_val, .operand_ptr = raw_operand_ptr, .cond = operand, .else_error_ty = else_error_ty, .switch_block_inst = inst, .tag_capture_inst = tag_capture_inst, }; const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = block, .sema = sema, .namespace = block.namespace, .instructions = .{}, .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, .comptime_reason = block.comptime_reason, .is_typeof = block.is_typeof, .c_import_buf = block.c_import_buf, .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, .want_safety = block.want_safety, .error_return_trace_index = block.error_return_trace_index, .src_base_inst = block.src_base_inst, .type_name_ctx = block.type_name_ctx, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { return resolveSwitchComptime( sema, spa, &child_block, operand, operand_val, operand_ty, src_node_offset, special, case_vals, scalar_cases_len, multi_cases_len, err_set, empty_enum, ); } if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline) { if (empty_enum) { return .void_value; } if (special_prong == .none) { return sema.fail(block, src, "switch must handle all possibilities", .{}); } if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) { return .unreachable_value; } if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); const ok = try block.addUnOp(.is_named_enum_value, operand); try sema.addSafetyCheck(block, src, ok, .corrupt_switch); } return spa.resolveProngComptime( &child_block, .special, special.body, special.capture, block.src(.{ .switch_capture = .{ .switch_node_offset = src_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), undefined, // case_vals may be undefined for special prongs .none, false, merges, ); } if (child_block.is_comptime) { _ = try sema.resolveConstDefinedValue(&child_block, operand_src, operand, .{ .needed_comptime_reason = "condition in comptime switch must be comptime-known", .block_comptime_reason = child_block.comptime_reason, }); unreachable; } _ = try sema.analyzeSwitchRuntimeBlock( spa, &child_block, src, operand, operand_ty, operand_src, case_vals, special, scalar_cases_len, multi_cases_len, union_originally, maybe_union_ty, err_set, src_node_offset, special_prong_src, seen_enum_fields, seen_errors, range_set, true_count, false_count, cond_dbg_node_index, false, ); return sema.resolveAnalyzedBlock(block, src, &child_block, merges, false); } const SpecialProng = struct { body: []const Zir.Inst.Index, end: usize, capture: Zir.Inst.SwitchBlock.ProngInfo.Capture, is_inline: bool, has_tag_capture: bool, }; fn analyzeSwitchRuntimeBlock( sema: *Sema, spa: SwitchProngAnalysis, child_block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, operand_ty: Type, operand_src: LazySrcLoc, case_vals: std.ArrayListUnmanaged(Air.Inst.Ref), special: SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, union_originally: bool, maybe_union_ty: Type, err_set: bool, switch_node_offset: i32, special_prong_src: LazySrcLoc, seen_enum_fields: []?LazySrcLoc, seen_errors: SwitchErrorSet, range_set: RangeSet, true_count: u8, false_count: u8, cond_dbg_node_index: Zir.Inst.Index, allow_err_code_unwrap: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const block = child_block.parent.?; const estimated_cases_extra = (scalar_cases_len + multi_cases_len) * @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2; var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra); defer cases_extra.deinit(gpa); var case_block = child_block.makeSubBlock(); case_block.runtime_loop = null; case_block.runtime_cond = operand_src; case_block.runtime_index.increment(); case_block.need_debug_scope = null; // this body is emitted regardless defer case_block.instructions.deinit(gpa); var extra_index: usize = special.end; var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const body = sema.code.bodySlice(extra_index, info.body_len); extra_index += info.body_len; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; const item = case_vals.items[scalar_i]; // `item` is already guaranteed to be constant known. const analyze_body = if (union_originally) blk: { const unresolved_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?; break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) { // nothing to do here } else if (analyze_body) { try spa.analyzeProngRuntime( &case_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, } }), &.{item}, if (info.is_inline) item else .none, info.has_tag_capture, ); } else { _ = try case_block.addNoOp(.unreach); } try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } var is_first = true; var prev_cond_br: Air.Inst.Index = undefined; var first_else_body: []const Air.Inst.Index = &.{}; defer gpa.free(first_else_body); var prev_then_body: []const Air.Inst.Index = &.{}; defer gpa.free(prev_then_body); var cases_len = scalar_cases_len; var case_val_idx: usize = scalar_cases_len; var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + items_len; const items = case_vals.items[case_val_idx..][0..items_len]; case_val_idx += items_len; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; // Generate all possible cases as scalar prongs. if (info.is_inline) { const body_start = extra_index + 2 * ranges_len; const body = sema.code.bodySlice(body_start, info.body_len); var emit_bb = false; var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { const range_items = case_vals.items[case_val_idx..][0..2]; extra_index += 2; case_val_idx += 2; const item_first_ref = range_items[0]; const item_last_ref = range_items[1]; var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_first_ref, undefined) catch unreachable; const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_last_ref, undefined) catch unreachable; while (item.compareScalar(.lte, item_last, operand_ty, pt)) : ({ // Previous validation has resolved any possible lazy values. item = sema.intAddScalar(item, try pt.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { error.Overflow => unreachable, else => |e| return e, }; }) { cases_len += 1; const item_ref = Air.internedToRef(item.toIntern()); case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (emit_bb) try sema.emitBackwardBranch(block, block.src(.{ .switch_case_item = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .range, .index = @intCast(range_i) }, } })); emit_bb = true; try spa.analyzeProngRuntime( &case_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), undefined, // case_vals may be undefined for ranges item_ref, info.has_tag_capture, ); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); if (item.compareScalar(.eq, item_last, operand_ty, pt)) break; } } for (items, 0..) |item, item_i| { cases_len += 1; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?; break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, block.src(.{ .switch_case_item = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } })); emit_bb = true; if (analyze_body) { try spa.analyzeProngRuntime( &case_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), &.{item}, item, info.has_tag_capture, ); } else { _ = try case_block.addNoOp(.unreach); } try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } extra_index += info.body_len; continue; } var any_ok: Air.Inst.Ref = .none; // If there are any ranges, we have to put all the items into the // else prong. Otherwise, we can take advantage of multiple items // mapping to the same body. if (ranges_len == 0) { cases_len += 1; const analyze_body = if (union_originally) for (items) |item| { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?; if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; const body = sema.code.bodySlice(extra_index, info.body_len); extra_index += info.body_len; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) { // nothing to do here } else if (analyze_body) { try spa.analyzeProngRuntime( &case_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), items, .none, false, ); } else { _ = try case_block.addNoOp(.unreach); } try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(@intCast(items.len)); cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); for (items) |item| { cases_extra.appendAssumeCapacity(@intFromEnum(item)); } cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } else { for (items) |item| { const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, item); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); } else { any_ok = cmp_ok; } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const range_items = case_vals.items[case_val_idx..][0..2]; extra_index += 2; case_val_idx += 2; const item_first = range_items[0]; const item_last = range_items[1]; // operand >= first and operand <= last const range_first_ok = try case_block.addBinOp( if (case_block.float_mode == .optimized) .cmp_gte_optimized else .cmp_gte, operand, item_first, ); const range_last_ok = try case_block.addBinOp( if (case_block.float_mode == .optimized) .cmp_lte_optimized else .cmp_lte, operand, item_last, ); const range_ok = try case_block.addBinOp( .bool_and, range_first_ok, range_last_ok, ); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); } else { any_ok = range_ok; } } const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = any_ok, .payload = undefined, }, } }); var cond_body = try case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; const body = sema.code.bodySlice(extra_index, info.body_len); extra_index += info.body_len; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) { // nothing to do here } else { try spa.analyzeProngRuntime( &case_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), items, .none, false, ); } if (is_first) { is_first = false; first_else_body = cond_body; cond_body = &.{}; } else { try sema.air_extra.ensureUnusedCapacity( gpa, @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, ); sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(prev_then_body.len), .else_body_len = @intCast(cond_body.len), }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cond_body)); } gpa.free(prev_then_body); prev_then_body = try case_block.instructions.toOwnedSlice(gpa); prev_cond_br = new_cond_br; } } var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(pt), }); } for (seen_enum_fields, 0..) |f, i| { if (f != null) continue; cases_len += 1; const item_val = try pt.enumValueFieldIndex(operand_ty, @intCast(i)); const item_ref = Air.internedToRef(item_val.toIntern()); case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; const analyze_body = if (union_originally) blk: { const field_ty = maybe_union_ty.unionFieldType(item_val, mod).?; break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); emit_bb = true; if (analyze_body) { try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), &.{item_ref}, item_ref, special.has_tag_capture, ); } else { _ = try case_block.addNoOp(.unreach); } try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } }, .ErrorSet => { if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(pt), }); } const error_names = operand_ty.errorSetNames(mod); for (0..error_names.len) |name_index| { const error_name = error_names.get(ip)[name_index]; if (seen_errors.contains(error_name)) continue; cases_len += 1; const item_val = try pt.intern(.{ .err = .{ .ty = operand_ty.toIntern(), .name = error_name, } }); const item_ref = Air.internedToRef(item_val); case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); emit_bb = true; try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), &.{item_ref}, item_ref, special.has_tag_capture, ); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } }, .Int => { var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set); while (try it.next()) |cur| { cases_len += 1; const item_ref = Air.internedToRef(cur); case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); emit_bb = true; try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), &.{item_ref}, item_ref, special.has_tag_capture, ); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } }, .Bool => { if (true_count == 0) { cases_len += 1; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); emit_bb = true; try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), &.{.bool_true}, .bool_true, special.has_tag_capture, ); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } if (false_count == 0) { cases_len += 1; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); emit_bb = true; try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), &.{.bool_false}, .bool_false, special.has_tag_capture, ); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(pt), }), }; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); try sema.addSafetyCheck(&case_block, src, ok, .corrupt_switch); } const analyze_body = if (union_originally and !special.is_inline) for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; const union_obj = mod.typeToUnion(maybe_union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]); if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; if (special.body.len != 0 and err_set and try sema.maybeErrorUnwrap(&case_block, special.body, operand, operand_src, allow_err_code_unwrap)) { // nothing to do here } else if (special.body.len != 0 and analyze_body and !special.is_inline) { try spa.analyzeProngRuntime( &case_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), undefined, // case_vals may be undefined for special prongs .none, false, ); } else { // We still need a terminator in this block, but we have proven // that it is unreachable. if (case_block.wantSafety()) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); try sema.safetyPanic(&case_block, src, .corrupt_switch); } else { _ = try case_block.addNoOp(.unreach); } } if (is_first) { final_else_body = case_block.instructions.items; } else { try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(prev_then_body.len), .else_body_len = @intCast(case_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); final_else_body = first_else_body; } } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + cases_extra.items.len + final_else_body.len); const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{ .cases_len = @intCast(cases_len), .else_body_len = @intCast(final_else_body.len), }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(final_else_body)); return try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = payload_index, } }, }); } fn resolveSwitchComptime( sema: *Sema, spa: SwitchProngAnalysis, child_block: *Block, cond_operand: Air.Inst.Ref, operand_val: Value, operand_ty: Type, switch_node_offset: i32, special: SpecialProng, case_vals: std.ArrayListUnmanaged(Air.Inst.Ref), scalar_cases_len: u32, multi_cases_len: u32, err_set: bool, empty_enum: bool, ) CompileError!Air.Inst.Ref { const merges = &child_block.label.?.merges; const resolved_operand_val = try sema.resolveLazyValue(operand_val); var extra_index: usize = special.end; { var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const body = sema.code.bodySlice(extra_index, info.body_len); extra_index += info.body_len; const item = case_vals.items[scalar_i]; const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable; if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) { if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand); return spa.resolveProngComptime( child_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, } }), &.{item}, if (info.is_inline) cond_operand else .none, info.has_tag_capture, merges, ); } } } { var multi_i: usize = 0; var case_val_idx: usize = scalar_cases_len; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + items_len; const body = sema.code.bodySlice(extra_index + 2 * ranges_len, info.body_len); const items = case_vals.items[case_val_idx..][0..items_len]; case_val_idx += items_len; for (items) |item| { // Validation above ensured these will succeed. const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable; if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) { if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand); return spa.resolveProngComptime( child_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), items, if (info.is_inline) cond_operand else .none, info.has_tag_capture, merges, ); } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const range_items = case_vals.items[case_val_idx..][0..2]; extra_index += 2; case_val_idx += 2; // Validation above ensured these will succeed. const first_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, range_items[0], undefined) catch unreachable; const last_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, range_items[1], undefined) catch unreachable; if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and (try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty))) { if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand); return spa.resolveProngComptime( child_block, .normal, body, info.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, } }), undefined, // case_vals may be undefined for ranges if (info.is_inline) cond_operand else .none, info.has_tag_capture, merges, ); } } extra_index += info.body_len; } } if (err_set) try sema.maybeErrorUnwrapComptime(child_block, special.body, cond_operand); if (empty_enum) { return .void_value; } return spa.resolveProngComptime( child_block, .special, special.body, special.capture, child_block.src(.{ .switch_capture = .{ .switch_node_offset = switch_node_offset, .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, } }), undefined, // case_vals may be undefined for special prongs if (special.is_inline) cond_operand else .none, special.has_tag_capture, merges, ); } const RangeSetUnhandledIterator = struct { pt: Zcu.PerThread, cur: ?InternPool.Index, max: InternPool.Index, range_i: usize, ranges: []const RangeSet.Range, limbs: []math.big.Limb, const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128); fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const pt = sema.pt; const int_type = pt.zcu.intern_pool.indexToKey(ty.toIntern()).int_type; const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits); return .{ .pt = pt, .cur = (try ty.minInt(pt, ty)).toIntern(), .max = (try ty.maxInt(pt, ty)).toIntern(), .range_i = 0, .ranges = range_set.ranges.items, .limbs = if (needed_limbs > preallocated_limbs) try sema.arena.alloc(math.big.Limb, needed_limbs) else &.{}, }; } fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index { if (val == it.max) return null; const int = it.pt.zcu.intern_pool.indexToKey(val).int; switch (int.storage) { inline .u64, .i64 => |val_int| { const next_int = @addWithOverflow(val_int, 1); if (next_int[1] == 0) return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern(); }, .big_int => {}, .lazy_align, .lazy_size => unreachable, } var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; const val_bigint = int.storage.toBigInt(&val_space); var result_limbs: [preallocated_limbs]math.big.Limb = undefined; var result_bigint = math.big.int.Mutable.init( if (it.limbs.len > 0) it.limbs else &result_limbs, 0, ); result_bigint.addScalar(val_bigint, 1); return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern(); } fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { var cur = it.cur orelse return null; while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) { defer it.range_i += 1; cur = (try it.addOne(it.ranges[it.range_i].last)) orelse { it.cur = null; return null; }; } it.cur = try it.addOne(cur); return cur; } }; const ResolvedSwitchItem = struct { ref: Air.Inst.Ref, val: InternPool.Index, }; fn resolveSwitchItemVal( sema: *Sema, block: *Block, item_ref: Zir.Inst.Ref, /// Coerce `item_ref` to this type. coerce_ty: Type, item_src: LazySrcLoc, ) CompileError!ResolvedSwitchItem { const uncoerced_item = try sema.resolveInst(item_ref); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. const item = try sema.coerce(block, coerce_ty, uncoerced_item, item_src); const maybe_lazy = try sema.resolveConstDefinedValue(block, item_src, item, .{ .needed_comptime_reason = "switch prong values must be comptime-known", }); const val = try sema.resolveLazyValue(maybe_lazy); const new_item = if (val.toIntern() != maybe_lazy.toIntern()) blk: { break :blk Air.internedToRef(val.toIntern()); } else item; return .{ .ref = new_item, .val = val.toIntern() }; } fn validateErrSetSwitch( sema: *Sema, block: *Block, seen_errors: *SwitchErrorSet, case_vals: *std.ArrayListUnmanaged(Air.Inst.Ref), operand_ty: Type, inst_data: std.meta.FieldType(Zir.Inst.Data, .pl_node), scalar_cases_len: u32, multi_cases_len: u32, else_case: struct { body: []const Zir.Inst.Index, end: usize, src: LazySrcLoc }, has_else: bool, ) CompileError!?Type { const gpa = sema.gpa; const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const src_node_offset = inst_data.src_node; const src = block.nodeOffset(src_node_offset); var extra_index: usize = else_case.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1 + info.body_len; case_vals.appendAssumeCapacity(try sema.validateSwitchItemError( block, seen_errors, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, .item_idx = .{ .kind = .single, .index = 0 }, } }), )); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + info.body_len; try case_vals.ensureUnusedCapacity(gpa, items.len); for (items, 0..) |item_ref, item_i| { case_vals.appendAssumeCapacity(try sema.validateSwitchItemError( block, seen_errors, item_ref, operand_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, .item_idx = .{ .kind = .single, .index = @intCast(item_i) }, } }), )); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) { .anyerror_type => { if (!has_else) { return sema.fail( block, src, "else prong required when switching on type 'anyerror'", .{}, ); } return Type.anyerror; }, else => |err_set_ty_index| else_validation: { const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names; var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); for (error_names.get(ip)) |error_name| { if (!seen_errors.contains(error_name) and !has_else) { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( src, "switch must handle all possibilities", .{}, ); break :blk maybe_msg.?; }; try sema.errNote( src, msg, "unhandled error value: 'error.{}'", .{error_name.fmt(ip)}, ); } } if (maybe_msg) |msg| { maybe_msg = null; try sema.addDeclaredHereNote(msg, operand_ty); return sema.failWithOwnedErrorMsg(block, msg); } if (has_else and seen_errors.count() == error_names.len) { // In order to enable common patterns for generic code allow simple else bodies // else => unreachable, // else => return, // else => |e| return e, // even if all the possible errors were already handled. const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) { .dbg_stmt, .dbg_var_val, .ret_type, .as_node, .ret_node, .@"unreachable", .@"defer", .defer_err_code, .err_union_code, .ret_err_value_code, .save_err_ret_index, .restore_err_ret_index_unconditional, .restore_err_ret_index_fn_entry, .is_non_err, .ret_is_non_err, .condbr, => {}, .extended => switch (datas[@intFromEnum(else_inst)].extended.opcode) { .restore_err_ret_index => {}, else => break, }, else => break, } else break :else_validation; return sema.fail( block, else_case.src, "unreachable else prong; all cases already handled", .{}, ); } var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); for (error_names.get(ip)) |error_name| { if (seen_errors.contains(error_name)) continue; names.putAssumeCapacityNoClobber(error_name, {}); } // No need to keep the hash map metadata correct; here we // extract the (sorted) keys only. return try pt.errorSetFromUnsortedNames(names.keys()); }, } return null; } fn validateSwitchRange( sema: *Sema, block: *Block, range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, operand_ty: Type, item_src: LazySrcLoc, ) CompileError![2]Air.Inst.Ref { const first_src: LazySrcLoc = .{ .base_node_inst = item_src.base_node_inst, .offset = .{ .switch_case_item_range_first = item_src.offset.switch_case_item }, }; const last_src: LazySrcLoc = .{ .base_node_inst = item_src.base_node_inst, .offset = .{ .switch_case_item_range_last = item_src.offset.switch_case_item }, }; const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, first_src); const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, last_src); if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, sema.pt)) { return sema.fail(block, item_src, "range start value is greater than the end value", .{}); } const maybe_prev_src = try range_set.add(first.val, last.val, item_src); try sema.validateSwitchDupe(block, maybe_prev_src, item_src); return .{ first.ref, last.ref }; } fn validateSwitchItemInt( sema: *Sema, block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); const maybe_prev_src = try range_set.add(item.val, item.val, item_src); try sema.validateSwitchDupe(block, maybe_prev_src, item_src); return item.ref; } fn validateSwitchItemEnum( sema: *Sema, block: *Block, seen_fields: []?LazySrcLoc, range_set: *RangeSet, item_ref: Zir.Inst.Ref, operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const ip = &sema.pt.zcu.intern_pool; const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); const int = ip.indexToKey(item.val).enum_tag.int; const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse { const maybe_prev_src = try range_set.add(int, int, item_src); try sema.validateSwitchDupe(block, maybe_prev_src, item_src); return item.ref; }; const maybe_prev_src = seen_fields[field_index]; seen_fields[field_index] = item_src; try sema.validateSwitchDupe(block, maybe_prev_src, item_src); return item.ref; } fn validateSwitchItemError( sema: *Sema, block: *Block, seen_errors: *SwitchErrorSet, item_ref: Zir.Inst.Ref, operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); const error_name = sema.pt.zcu.intern_pool.indexToKey(item.val).err.name; const maybe_prev_src = if (try seen_errors.fetchPut(error_name, item_src)) |prev| prev.value else null; try sema.validateSwitchDupe(block, maybe_prev_src, item_src); return item.ref; } fn validateSwitchDupe( sema: *Sema, block: *Block, maybe_prev_src: ?LazySrcLoc, item_src: LazySrcLoc, ) CompileError!void { const prev_item_src = maybe_prev_src orelse return; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg( item_src, "duplicate switch value", .{}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( prev_item_src, msg, "previous value here", .{}, ); break :msg msg; }); } fn validateSwitchItemBool( sema: *Sema, block: *Block, true_count: *u8, false_count: *u8, item_ref: Zir.Inst.Ref, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, item_src); if (Value.fromInterned(item.val).toBool()) { true_count.* += 1; } else { false_count.* += 1; } if (true_count.* > 1 or false_count.* > 1) { return sema.fail(block, item_src, "duplicate switch value", .{}); } return item.ref; } const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, LazySrcLoc); fn validateSwitchItemSparse( sema: *Sema, block: *Block, seen_values: *ValueSrcMap, item_ref: Zir.Inst.Ref, operand_ty: Type, item_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src); const kv = try seen_values.fetchPut(sema.gpa, item.val, item_src) orelse return item.ref; try sema.validateSwitchDupe(block, kv.value, item_src); unreachable; } fn validateSwitchNoRange( sema: *Sema, block: *Block, ranges_len: u32, operand_ty: Type, src_node_offset: i32, ) CompileError!void { if (ranges_len == 0) return; const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset }); const range_src = block.src(.{ .node_offset_switch_range = src_node_offset }); const msg = msg: { const msg = try sema.errMsg( operand_src, "ranges not allowed when switching on type '{}'", .{operand_ty.fmt(sema.pt)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( range_src, msg, "range here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn maybeErrorUnwrap( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref, operand_src: LazySrcLoc, allow_err_code_inst: bool, ) !bool { const pt = sema.pt; const mod = pt.zcu; if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); for (body) |inst| { switch (tags[@intFromEnum(inst)]) { .@"unreachable" => if (!block.wantSafety()) return false, .err_union_code => if (!allow_err_code_inst) return false, .save_err_ret_index, .dbg_stmt, .str, .as_node, .panic, .field_val, => {}, else => return false, } } for (body) |inst| { const air_inst = switch (tags[@intFromEnum(inst)]) { .err_union_code => continue, .dbg_stmt => { try sema.zirDbgStmt(block, inst); continue; }, .save_err_ret_index => { try sema.zirSaveErrRetIndex(block, inst); continue; }, .str => try sema.zirStr(inst), .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), .@"unreachable" => { if (!mod.comp.formatted_panics) { try sema.safetyPanic(block, operand_src, .unwrap_error); return true; } const panic_fn = try pt.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); return true; }, .panic => { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); const panic_fn = try pt.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); return true; }, else => unreachable, }; if (sema.typeOf(air_inst).isNoReturn(mod)) return true; sema.inst_map.putAssumeCapacity(inst, air_inst); } unreachable; } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { const pt = sema.pt; const mod = pt.zcu; const index = cond.toIndex() orelse return; if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { if (!operand_ty.isError(mod)) return; if (val.getErrorName(mod) == .none) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !void { const tags = sema.code.instructions.items(.tag); const inst = for (body) |inst| { switch (tags[@intFromEnum(inst)]) { .dbg_stmt, .save_err_ret_index, => {}, .@"unreachable" => break inst, else => return, } } else return; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"unreachable"; const src = block.nodeOffset(inst_data.src_node); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getErrorName(sema.pt.zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } } } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const name_src = block.builtinCallArgSrc(inst_data.src_node, 1); const ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); try ty.resolveFields(pt); const ip = &mod.intern_pool; const has_field = hf: { switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => { if (field_name.eqlSlice("ptr", ip)) break :hf true; if (field_name.eqlSlice("len", ip)) break :hf true; break :hf false; }, else => {}, }, .anon_struct_type => |anon_struct| { if (anon_struct.names.len != 0) { break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names.get(ip), field_name) != null; } else { const field_index = field_name.toUnsigned(ip) orelse break :hf false; break :hf field_index < ty.structFieldCount(mod); } }, .struct_type => { break :hf ip.loadStructType(ty.toIntern()).nameIndex(ip, field_name) != null; }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); break :hf union_type.loadTagType(ip).nameIndex(ip, field_name) != null; }, .enum_type => { break :hf ip.loadEnumType(ty.toIntern()).nameIndex(ip, field_name) != null; }, .array_type => break :hf field_name.eqlSlice("len", ip), else => {}, } return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ ty.fmt(pt), }); }; return if (has_field) .bool_true else .bool_false; } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.builtinCallArgSrc(inst_data.src_node, 0); const rhs_src = block.builtinCallArgSrc(inst_data.src_node, 1); const container_type = try sema.resolveType(block, lhs_src, extra.lhs); const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, .{ .needed_comptime_reason = "decl name must be comptime-known", }); try sema.checkNamespaceType(block, lhs_src, container_type); const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| { if (lookup.accessible) { return .bool_true; } } return .bool_false; } fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const operand_src = block.tokenOffset(inst_data.src_tok); const operand = inst_data.get(sema.code); const result = pt.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand}); }, error.ModuleNotFound => { return sema.fail(block, operand_src, "no module named '{s}' available within module {s}", .{ operand, block.getFileScope(zcu).mod.fully_qualified_name, }); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; // TODO: register some kind of dependency on the file. // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(operand_src, ty); return Air.internedToRef(ty); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const name = try sema.resolveConstString(block, operand_src, inst_data.operand, .{ .needed_comptime_reason = "file path name must be comptime-known", }); if (name.len == 0) { return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } const val = pt.embedFile(block.getFileScope(pt.zcu), name, operand_src) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) }); }, }; return Air.internedToRef(val); } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); _ = try pt.getErrorValue(name); const error_set_type = try pt.singleErrorSetType(name); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, } }))); } fn zirShl( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const scalar_ty = lhs_ty.scalarType(mod); const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); const maybe_lhs_val = try sema.resolveValueIntable(lhs); const maybe_rhs_val = try sema.resolveValueIntable(rhs); if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); if (rhs_elem.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValueSema(pt, sema), i, scalar_ty.fmt(pt), }); } } } else if (rhs_val.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValueSema(pt, sema), scalar_ty.fmt(pt), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); if (rhs_elem.compareHetero(.lt, try pt.intValue(scalar_rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValueSema(pt, sema), i, }); } } } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValueSema(pt, sema), }); } } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { if (lhs_val.isUndef(mod)) return pt.undefRef(lhs_ty); const rhs_val = maybe_rhs_val orelse { if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; }; const val = if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, pt) else switch (air_tag) { .shl_exact => val: { const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, pt); if (shifted.overflow_bit.compareAllWithZero(.eq, pt)) { break :val shifted.wrapped_result; } return sema.fail(block, src, "operation caused overflow", .{}); }, .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, pt), .shl => try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, pt), else => unreachable, }; return Air.internedToRef(val.toIntern()); } else lhs_src; const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = Air.internedToRef((try lhs_ty.maxInt(pt, lhs_ty)).toIntern()); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); } else { break :rhs rhs; } } else rhs; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try pt.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern()); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = lt, .operation = .And, } }, }); } else ok: { const bit_count_inst = Air.internedToRef(bit_count_val.toIntern()); break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst); }; try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big); } if (air_tag == .shl_exact) { const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(lhs_ty); const op_ov = try block.addInst(.{ .tag = .shl_with_overflow, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(op_ov_tuple_ty.toIntern()), .payload = try sema.addExtra(Air.Bin{ .lhs = lhs, .rhs = rhs, }), } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = ov_bit, .operation = .Or, } }, }) else ov_bit; const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, src, no_ov, .shl_overflow); return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty); } } return block.addBinOp(air_tag, lhs, new_rhs); } fn zirShr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveValueIntable(lhs); const maybe_rhs_val = try sema.resolveValueIntable(rhs); const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { if (rhs_val.isUndef(mod)) { return pt.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); if (rhs_elem.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValueSema(pt, sema), i, scalar_ty.fmt(pt), }); } } } else if (rhs_val.compareHetero(.gte, bit_value, pt)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValueSema(pt, sema), scalar_ty.fmt(pt), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(pt, i); if (rhs_elem.compareHetero(.lt, try pt.intValue(rhs_ty.childType(mod), 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValueSema(pt, sema), i, }); } } } else if (rhs_val.compareHetero(.lt, try pt.intValue(rhs_ty, 0), pt)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValueSema(pt, sema), }); } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(lhs_ty); } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, pt); if (!(try truncated.compareAllWithZeroSema(.eq, pt))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else { break :rs lhs_src; } } else rhs_src; if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try pt.intValue(rhs_ty.scalarType(mod), bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern()); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = lt, .operation = .And, } }, }); } else ok: { const bit_count_inst = Air.internedToRef(bit_count_val.toIntern()); break :ok try block.addBinOp(.cmp_lt, rhs, bit_count_inst); }; try sema.addSafetyCheck(block, src, ok, .shift_rhs_too_big); } if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = eql, .operation = .And, } }, }); } else try block.addBinOp(.cmp_eq, lhs, back); try sema.addSafetyCheck(block, src, ok, .shr_overflow); } } return result; } fn zirBitwise( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); const scalar_type = resolved_type.scalarType(mod); const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { // TODO: ask the linker what kind of relocations are available, and // in some cases emit a Value that means "this decl's address AND'd with this operand". if (try sema.resolveValueIntable(casted_lhs)) |lhs_val| { if (try sema.resolveValueIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, pt), .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, pt), .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, pt), else => unreachable, }; return Air.internedToRef(result_val.toIntern()); } else { break :runtime rhs_src; } } else { break :runtime lhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); const scalar_type = operand_type.scalarType(mod); if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ operand_type.fmt(pt), }); } if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) { return pt.undefRef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); elem.* = (try elem_val.bitwiseNot(scalar_type, sema.arena, pt)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_type.toIntern(), .storage = .{ .elems = elems }, } }))); } else { const result_val = try val.bitwiseNot(operand_type, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); } } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.not, operand_type, operand); } fn analyzeTupleCat( sema: *Sema, block: *Block, src_node: i32, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = block.nodeOffset(src_node); const lhs_len = lhs_ty.structFieldCount(mod); const rhs_len = rhs_ty.structFieldCount(mod); const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { return Air.internedToRef(Value.empty_struct.toIntern()); } if (lhs_len == 0) { return rhs; } if (rhs_len == 0) { return lhs; } const final_len = try sema.usizeCast(block, src, dest_fields); const types = try sema.arena.alloc(InternPool.Index, final_len); const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { types[i] = lhs_ty.structFieldType(i, mod).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, mod); values[i] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, .elem_index = i, } }); if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, mod); values[i + lhs_len] = default_val.toIntern(); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = src_node, .elem_index = i, } }); if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i + lhs_len] = .none; } } break :rs runtime_src; }; const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); return Air.internedToRef(tuple_val); }; try sema.requireRuntimeBlock(block, src, runtime_src); const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len); var i: u32 = 0; while (i < lhs_len) : (i += 1) { const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, .elem_index = i, } }); element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, lhs, i, lhs_ty); } i = 0; while (i < rhs_len) : (i += 1) { const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = src_node, .elem_index = i, } }); element_refs[i + lhs_len] = try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty); } return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = block.nodeOffset(inst_data.src_node); const lhs_is_tuple = lhs_ty.isTuple(mod); const rhs_is_tuple = rhs_ty.isTuple(mod); if (lhs_is_tuple and rhs_is_tuple) { return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs); } const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(pt)}); }; const resolved_elem_ty = t: { var trash_block = block.makeSubBlock(); trash_block.is_comptime = false; defer trash_block.instructions.deinit(sema.gpa); const instructions = [_]Air.Inst.Ref{ try trash_block.addBitCast(lhs_info.elem_type, .void_value), try trash_block.addBitCast(rhs_info.elem_type, .void_value), }; break :t try sema.resolvePeerTypes(block, src, &instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); }; // When there is a sentinel mismatch, no sentinel on the result. // Otherwise, use the sentinel value provided by either operand, // coercing it to the peer-resolved element type. const res_sent_val: ?Value = s: { if (lhs_info.sentinel) |lhs_sent_val| { const lhs_sent = Air.internedToRef(lhs_sent_val.toIntern()); if (rhs_info.sentinel) |rhs_sent_val| { const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern()); const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src); const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src); const lhs_sent_casted_val = (try sema.resolveDefinedValue(block, lhs_src, lhs_sent_casted)).?; const rhs_sent_casted_val = (try sema.resolveDefinedValue(block, rhs_src, rhs_sent_casted)).?; if (try sema.valuesEqual(lhs_sent_casted_val, rhs_sent_casted_val, resolved_elem_ty)) { break :s lhs_sent_casted_val; } else { break :s null; } } else { const lhs_sent_casted = try sema.coerce(block, resolved_elem_ty, lhs_sent, lhs_src); const lhs_sent_casted_val = (try sema.resolveDefinedValue(block, lhs_src, lhs_sent_casted)).?; break :s lhs_sent_casted_val; } } else { if (rhs_info.sentinel) |rhs_sent_val| { const rhs_sent = Air.internedToRef(rhs_sent_val.toIntern()); const rhs_sent_casted = try sema.coerce(block, resolved_elem_ty, rhs_sent, rhs_src); const rhs_sent_casted_val = (try sema.resolveDefinedValue(block, rhs_src, rhs_sent_casted)).?; break :s rhs_sent_casted_val; } else { break :s null; } } }; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); const rhs_len = try sema.usizeCast(block, rhs_src, rhs_info.len); const result_len = std.math.add(usize, lhs_len, rhs_len) catch |err| switch (err) { error.Overflow => return sema.fail( block, src, "concatenating arrays of length {d} and {d} produces an array too large for this compiler implementation to handle", .{ lhs_len, rhs_len }, ), }; const result_ty = try pt.arrayType(.{ .len = result_len, .sentinel = if (res_sent_val) |v| v.toIntern() else .none, .child = resolved_elem_ty.toIntern(), }); const ptr_addrspace = p: { if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveValue(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveValue(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, }) |rhs_val| { const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :rs lhs_src else if (lhs_ty.isSlice(mod)) try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :rs lhs_src else lhs_val; const rhs_sub_val = if (rhs_ty.isSinglePointer(mod)) try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty) orelse break :rs rhs_src else if (rhs_ty.isSlice(mod)) try sema.maybeDerefSliceAsArray(block, rhs_src, rhs_val) orelse break :rs rhs_src else rhs_val; const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(pt, lhs_elem_i) else elem_default_val; const elem_val_inst = Air.internedToRef(elem_val.toIntern()); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = elem_i, } }); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src); const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined); element_vals[elem_i] = coerced_elem_val.toIntern(); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(pt, rhs_elem_i) else elem_default_val; const elem_val_inst = Air.internedToRef(elem_val.toIntern()); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = @intCast(rhs_elem_i), } }); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, operand_src); const coerced_elem_val = try sema.resolveConstValue(block, operand_src, coerced_elem_val_inst, undefined); element_vals[elem_i] = coerced_elem_val.toIntern(); } return sema.addConstantMaybeRef(try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { const elem_index = try pt.intRef(Type.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = elem_i, } }); const init = try sema.elemVal(block, operand_src, lhs, elem_index, src, true); try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_index = try pt.intRef(Type.usize, elem_i); const rhs_index = try pt.intRef(Type.usize, rhs_elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = @intCast(rhs_elem_i), } }); const init = try sema.elemVal(block, operand_src, rhs, rhs_index, src, true); try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store); } if (res_sent_val) |sent_val| { const elem_index = try pt.intRef(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } return alloc; } const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len); { var elem_i: u32 = 0; while (elem_i < lhs_len) : (elem_i += 1) { const index = try pt.intRef(Type.usize, elem_i); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = elem_i, } }); const init = try sema.elemVal(block, operand_src, lhs, index, src, true); element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, operand_src); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const index = try pt.intRef(Type.usize, rhs_elem_i); const operand_src = block.src(.{ .array_cat_rhs = .{ .array_cat_offset = inst_data.src_node, .elem_index = @intCast(rhs_elem_i), } }); const init = try sema.elemVal(block, operand_src, rhs, index, src, true); element_refs[elem_i] = try sema.coerce(block, resolved_elem_ty, init, operand_src); } } return block.addAggregateInit(result_ty, element_refs); } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { .Array => return operand_ty.arrayInfo(mod), .Pointer => { const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.flags.size) { .Slice => { const val = try sema.resolveConstDefinedValue(block, src, operand, .{ .needed_comptime_reason = "slice value being concatenated must be comptime-known", }); return Type.ArrayInfo{ .elem_type = Type.fromInterned(ptr_info.child), .sentinel = switch (ptr_info.sentinel) { .none => null, else => Value.fromInterned(ptr_info.sentinel), }, .len = try val.sliceLen(pt), }; }, .One => { if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) { return Type.fromInterned(ptr_info.child).arrayInfo(mod); } }, .C, .Many => {}, } }, .Struct => { if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) { assert(!peer_ty.isTuple(mod)); return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, .len = operand_ty.arrayLen(mod), }; } }, else => {}, } return null; } fn analyzeTupleMul( sema: *Sema, block: *Block, src_node: i32, operand: Air.Inst.Ref, factor: usize, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); const src = block.nodeOffset(src_node); const len_src = block.src(.{ .node_offset_bin_rhs = src_node }); const tuple_len = operand_ty.structFieldCount(mod); const final_len = std.math.mul(usize, tuple_len, factor) catch return sema.fail(block, len_src, "operation results in overflow", .{}); if (final_len == 0) { return Air.internedToRef(Value.empty_struct.toIntern()); } const types = try sema.arena.alloc(InternPool.Index, final_len); const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { types[i] = operand_ty.structFieldType(i, mod).toIntern(); values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, .elem_index = @intCast(i), } }); if (values[i] == .unreachable_value) { runtime_src = operand_src; values[i] = .none; // TODO don't treat unreachable_value as special } } for (0..factor) |i| { mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]); mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]); } break :rs runtime_src; }; const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); return Air.internedToRef(tuple_val); }; try sema.requireRuntimeBlock(block, src, runtime_src); const element_refs = try sema.arena.alloc(Air.Inst.Ref, final_len); var i: u32 = 0; while (i < tuple_len) : (i += 1) { const operand_src = block.src(.{ .array_cat_lhs = .{ .array_cat_offset = src_node, .elem_index = i, } }); element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(i), operand_ty); } i = 1; while (i < factor) : (i += 1) { @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayMul, inst_data.payload_index).data; const uncoerced_lhs = try sema.resolveInst(extra.lhs); const uncoerced_lhs_ty = sema.typeOf(uncoerced_lhs); const src: LazySrcLoc = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const operator_src = block.src(.{ .node_offset_main_token = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs, const lhs_ty = coerced_lhs: { // If we have a result type, we might be able to do this more efficiently // by coercing the LHS first. Specifically, if we want an array or vector // and have a tuple, coerce the tuple immediately. no_coerce: { if (extra.res_ty == .none) break :no_coerce; const res_ty_inst = try sema.resolveInst(extra.res_ty); const res_ty = try sema.analyzeAsType(block, src, res_ty_inst); if (res_ty.isGenericPoison()) break :no_coerce; if (!uncoerced_lhs_ty.isTuple(mod)) break :no_coerce; const lhs_len = uncoerced_lhs_ty.structFieldCount(mod); const lhs_dest_ty = switch (res_ty.zigTypeTag(mod)) { else => break :no_coerce, .Array => try pt.arrayType(.{ .child = res_ty.childType(mod).toIntern(), .len = lhs_len, .sentinel = if (res_ty.sentinel(mod)) |s| s.toIntern() else .none, }), .Vector => try pt.vectorType(.{ .child = res_ty.childType(mod).toIntern(), .len = lhs_len, }), }; // Attempt to coerce to this type, but don't emit an error if it fails. Instead, // just exit out of this path and let the usual error happen later, so that error // messages are consistent. const coerced = sema.coerceExtra(block, lhs_dest_ty, uncoerced_lhs, lhs_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => break :no_coerce, else => |e| return e, }; break :coerced_lhs .{ coerced, lhs_dest_ty }; } break :coerced_lhs .{ uncoerced_lhs, uncoerced_lhs_ty }; }; if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .needed_comptime_reason = "array multiplication factor must be comptime-known", }); const factor_casted = try sema.usizeCast(block, rhs_src, factor); return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } // Analyze the lhs first, to catch the case that someone tried to do exponentiation const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, else => {}, } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .needed_comptime_reason = "array multiplication factor must be comptime-known", }); const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); const result_ty = try pt.arrayType(.{ .len = result_len, .sentinel = if (lhs_info.sentinel) |s| s.toIntern() else .none, .child = lhs_info.elem_type.toIntern(), }); const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| ct: { const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty) orelse break :ct else if (lhs_ty.isSlice(mod)) try sema.maybeDerefSliceAsArray(block, lhs_src, lhs_val) orelse break :ct else lhs_val; const val = v: { // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. if (lhs_len == 1 and lhs_info.sentinel == null) { const elem_val = try lhs_sub_val.elemValue(pt, 0); break :v try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .repeated_elem = elem_val.toIntern() }, } }); } const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { const elem_val = try lhs_sub_val.elemValue(pt, lhs_i); element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } break :v try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); }; return sema.addConstantMaybeRef(val, ptr_addrspace != null); } try sema.requireRuntimeBlock(block, src, lhs_src); // Grab all the LHS values ahead of time, rather than repeatedly emitting instructions // to get the same elem values. const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len); for (lhs_vals, 0..) |*lhs_val, idx| { const idx_ref = try pt.intRef(Type.usize, idx); lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false); } if (ptr_addrspace) |ptr_as| { const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); var elem_i: usize = 0; while (elem_i < result_len) { for (lhs_vals) |lhs_val| { const elem_index = try pt.intRef(Type.usize, elem_i); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { const elem_index = try pt.intRef(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); const init = Air.internedToRef(sent_val.toIntern()); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } return alloc; } const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len); for (0..try sema.usizeCast(block, rhs_src, factor)) |i| { @memcpy(element_refs[i * lhs_len ..][0..lhs_len], lhs_vals); } return block.addAggregateInit(result_ty, element_refs); } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = src; const rhs_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); const rhs_scalar_ty = rhs_ty.scalarType(mod); if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}); } if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveValue(rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return pt.undefRef(rhs_ty); return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .optimized) .neg_optimized else .neg, rhs); } const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern()); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const lhs_src = src; const rhs_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); const rhs_scalar_ty = rhs_ty.scalarType(mod); switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}), } const lhs = Air.internedToRef((try sema.splat(rhs_ty, try pt.intValue(rhs_scalar_ty, 0))).toIntern()); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } fn zirArithmetic( sema: *Sema, block: *Block, inst: Zir.Inst.Index, zir_tag: Zir.Inst.Tag, safety: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, src, lhs_src, rhs_src, safety); } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. const lhs_val = maybe_lhs_val orelse unreachable; const rhs_val = maybe_rhs_val orelse unreachable; const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt) catch unreachable; if (!rem.compareAllWithZero(.eq, pt)) { return sema.fail( block, src, "ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'", .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), rem.fmtValueSema(pt, sema) }, ); } } // TODO: emit compile error when .div is used on integers and there would be an // ambiguous result between div_floor and div_trunc. // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // // For floats: // If the rhs is zero: // * comptime_float: compile error for division by zero. // * other float type: // * if the lhs is zero: QNaN // * otherwise: +Inf or -Inf depending on lhs sign // If the rhs is undefined: // * comptime_float: compile error because there is a possible // value (zero) for which the division would be illegal behavior. // * other float type: result is undefined // If the lhs is undefined, result is undefined. switch (scalar_tag) { .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly } }, else => {}, } const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else { break :rs rhs_src; } } else { break :rs lhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int); try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } const air_tag = if (is_int) blk: { if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { return sema.fail( block, src, "division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt) }, ); } break :blk Air.Inst.Tag.div_trunc; } else switch (block.float_mode) { .optimized => Air.Inst.Tag.div_float_optimized, .strict => Air.Inst.Tag.div_float, }; return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const lhs_scalar_ty = lhs_ty.scalarType(mod); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. // TODO: emit runtime safety for if there is a remainder // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (is_int) { const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt); if (!(modulus_val.compareAllWithZero(.eq, pt))) { return sema.fail(block, src, "exact division produced remainder", .{}); } var overflow_idx: ?usize = null; const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt); if (!(modulus_val.compareAllWithZero(.eq, pt))) { return sema.fail(block, src, "exact division produced remainder", .{}); } return Air.internedToRef((try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); // Depending on whether safety is enabled, we will have a slightly different strategy // here. The `div_exact` AIR instruction causes undefined behavior if a remainder // is produced, so in the safety check case, it cannot be used. Instead we do a // div_trunc and check for remainder. if (block.wantSafety()) { try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int); try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); const result = try block.addBinOp(.div_trunc, casted_lhs, casted_rhs); const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { .strict => .reduce, .optimized => .reduce_optimized, }, .data = .{ .reduce = .{ .operand = eql, .operation = .And, } }, }); } else { const is_in_range = try block.addBinOp(switch (block.float_mode) { .strict => .cmp_eq, .optimized => .cmp_eq_optimized, }, result, floored); break :ok is_in_range; } } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; if (resolved_type.zigTypeTag(mod) == .Vector) { const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = Air.internedToRef(zero_val.toIntern()); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = eql, .operation = .And, } }, }); } else { const zero = Air.internedToRef(scalar_zero.toIntern()); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } }; try sema.addSafetyCheck(block, src, ok, .exact_division_remainder); return result; } return block.addBinOp(airTag(block, is_int, .div_exact, .div_exact_optimized), casted_lhs, casted_rhs); } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return Air.internedToRef((try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else { return Air.internedToRef((try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int); try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } return block.addBinOp(airTag(block, is_int, .div_floor, .div_floor_optimized), casted_lhs, casted_rhs); } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try pt.intValue(resolved_type, -1), resolved_type)) { return pt.undefRef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return Air.internedToRef(res.toIntern()); } else { return Air.internedToRef((try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs rhs_src; } else break :rs lhs_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivIntOverflowSafety(block, src, resolved_type, lhs_scalar_ty, maybe_lhs_val, maybe_rhs_val, casted_lhs, casted_rhs, is_int); try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } return block.addBinOp(airTag(block, is_int, .div_trunc, .div_trunc_optimized), casted_lhs, casted_rhs); } fn addDivIntOverflowSafety( sema: *Sema, block: *Block, src: LazySrcLoc, resolved_type: Type, lhs_scalar_ty: Type, maybe_lhs_val: ?Value, maybe_rhs_val: ?Value, casted_lhs: Air.Inst.Ref, casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } const min_int = try resolved_type.minInt(pt, resolved_type); const neg_one_scalar = try pt.intValue(lhs_scalar_ty, -1); const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { if (try lhs_val.compareAll(.neq, min_int, resolved_type, pt)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { if (try rhs_val.compareAll(.neq, neg_one, resolved_type, pt)) return; } var ok: Air.Inst.Ref = .none; if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = Air.internedToRef(min_int.toIntern()); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); } if (maybe_rhs_val == null) { const neg_one_ref = Air.internedToRef(neg_one.toIntern()); const rhs_ok = try block.addCmpVector(casted_rhs, neg_one_ref, .neq); if (ok == .none) { ok = rhs_ok; } else { ok = try block.addBinOp(.bool_or, ok, rhs_ok); } } assert(ok != .none); ok = try block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = ok, .operation = .And, } }, }); } else { if (maybe_lhs_val == null) { const min_int_ref = Air.internedToRef(min_int.toIntern()); ok = try block.addBinOp(.cmp_neq, casted_lhs, min_int_ref); } if (maybe_rhs_val == null) { const neg_one_ref = Air.internedToRef(neg_one.toIntern()); const rhs_ok = try block.addBinOp(.cmp_neq, casted_rhs, neg_one_ref); if (ok == .none) { ok = rhs_ok; } else { ok = try block.addBinOp(.bool_or, ok, rhs_ok); } } assert(ok != .none); } try sema.addSafetyCheck(block, src, ok, .integer_overflow); } fn addDivByZeroSafety( sema: *Sema, block: *Block, src: LazySrcLoc, resolved_type: Type, maybe_rhs_val: ?Value, casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { // Strict IEEE floats have well-defined division by zero. if (!is_int and block.float_mode == .strict) return; // If rhs was comptime-known to be zero a compile error would have been // emitted above. if (maybe_rhs_val != null) return; const pt = sema.pt; const mod = pt.zcu; const scalar_zero = if (is_int) try pt.intValue(resolved_type.scalarType(mod), 0) else try pt.floatValue(resolved_type.scalarType(mod), 0.0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = Air.internedToRef(zero_val.toIntern()); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ .tag = if (is_int) .reduce else .reduce_optimized, .data = .{ .reduce = .{ .operand = ok, .operation = .And, } }, }); } else ok: { const zero = Air.internedToRef(scalar_zero.toIntern()); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, src, ok, .divide_by_zero); } fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag { if (is_int) return normal; return switch (block.float_mode) { .strict => normal, .optimized => optimized, }; } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. // // For either one: if the result would be different between @mod and @rem, // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; const zero_val = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = resolved_type.toIntern(), .storage = .{ .repeated_elem = scalar_zero.toIntern() }, } })) else scalar_zero; return Air.internedToRef(zero_val.toIntern()); } } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. if (!(try lhs_val.compareAllWithZeroSema(.gte, pt)) and !(try rem_result.compareAllWithZeroSema(.eq, pt))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef(rem_result.toIntern()); } break :rs lhs_src; } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, pt))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } } else { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } const air_tag = airTag(block, is_int, .rem, .rem_optimized); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn intRem( sema: *Sema, ty: Type, lhs: Value, rhs: Value, ) CompileError!Value { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); scalar.* = (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })); } return sema.intRemScalar(lhs, rhs, ty); } fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { const pt = sema.pt; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, ); const limbs_r = try sema.arena.alloc( math.big.Limb, // TODO: consider reworking Sema to re-use Values rather than // always producing new Value objects. rhs_bigint.limbs.len, ); const limbs_buffer = try sema.arena.alloc( math.big.Limb, math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); return pt.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return Air.internedToRef((try lhs_val.intMod(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } break :rs lhs_src; } else { break :rs rhs_src; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return Air.internedToRef((try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs rhs_src; } else break :rs lhs_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } const air_tag = airTag(block, is_int, .mod, .mod_optimized); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrIntArithmetic(block, src, lhs_ty); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src = rs: { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return Air.internedToRef((try sema.intRem(resolved_type, lhs_val, rhs_val)).toIntern()); } break :rs lhs_src; } else { break :rs rhs_src; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroSema(.neq, pt))) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs rhs_src; } else break :rs lhs_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { try sema.addDivByZeroSafety(block, src, resolved_type, maybe_rhs_val, casted_rhs, is_int); } const air_tag = airTag(block, is_int, .rem, .rem_optimized); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirOverflowArithmetic( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, zir_tag: Zir.Inst.Extended, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = block.nodeOffset(extra.node); const lhs_src = block.builtinCallArgSrc(extra.node, 0); const rhs_src = block.builtinCallArgSrc(extra.node, 1); const uncasted_lhs = try sema.resolveInst(extra.lhs); const uncasted_rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const instructions = &[_]Air.Inst.Ref{ uncasted_lhs, uncasted_rhs }; const dest_ty = if (zir_tag == .shl_with_overflow) lhs_ty else try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const rhs_dest_ty = if (zir_tag == .shl_with_overflow) try sema.log2IntType(block, lhs_ty, src) else dest_ty; const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(pt)}); } const maybe_lhs_val = try sema.resolveValue(lhs); const maybe_rhs_val = try sema.resolveValue(rhs); const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1]); var result: struct { inst: Air.Inst.Ref = .none, wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { const zero_bit = try pt.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } const result = try sema.intAddWithOverflow(lhs_val, rhs_val, dest_ty); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } }, .sub_with_overflow => { // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } const result = try sema.intSubWithOverflow(lhs_val, rhs_val, dest_ty); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } }, .mul_with_overflow => { // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. const scalar_one = try pt.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } const result = try lhs_val.intMulWithOverflow(rhs_val, dest_ty, sema.arena, pt); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } }, .shl_with_overflow => { // If lhs is zero, the result is zero and no overflow occurred. // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, pt); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } }, else => unreachable, } const air_tag: Air.Inst.Tag = switch (zir_tag) { .add_with_overflow => .add_with_overflow, .mul_with_overflow => .mul_with_overflow, .sub_with_overflow => .sub_with_overflow, .shl_with_overflow => .shl_with_overflow, else => unreachable, }; const runtime_src = if (maybe_lhs_val == null) lhs_src else rhs_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addInst(.{ .tag = air_tag, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(tuple_ty.toIntern()), .payload = try block.sema.addExtra(Air.Bin{ .lhs = lhs, .rhs = rhs, }), } }, }); }; if (result.inst != .none) { if (try sema.resolveValue(result.inst)) |some| { result.wrapped = some; result.inst = .none; } } if (result.inst == .none) { return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = tuple_ty.toIntern(), .storage = .{ .elems = &.{ result.wrapped.toIntern(), result.overflow_bit.toIntern(), } }, } }))); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); element_refs[0] = result.inst; element_refs[1] = Air.internedToRef(result.overflow_bit.toIntern()); return block.addAggregateInit(tuple_ty, element_refs); } fn splat(sema: *Sema, ty: Type, val: Value) !Value { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) != .Vector) return val; const repeated = try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = val.toIntern() }, } }); return Value.fromInterned(repeated); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type, }) else Type.u1; const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; const tuple_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = &types, .values = &values, .names = &.{}, }); return Type.fromInterned(tuple_ty); } fn analyzeArithmetic( sema: *Sema, block: *Block, /// TODO performance investigation: make this comptime? zir_tag: Zir.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); if (lhs_zig_ty_tag == .Pointer) { if (rhs_zig_ty_tag == .Pointer) { if (lhs_ty.ptrSize(mod) != .Slice and rhs_ty.ptrSize(mod) != .Slice) { if (zir_tag != .sub) { return sema.failWithInvalidPtrArithmetic(block, src, "pointer-pointer", "subtraction"); } if (!lhs_ty.elemType2(mod).eql(rhs_ty.elemType2(mod), mod)) { return sema.fail(block, src, "incompatible pointer arithmetic operands '{}' and '{}'", .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), }); } const elem_size = lhs_ty.elemType2(mod).abiSize(pt); if (elem_size == 0) { return sema.fail(block, src, "pointer arithmetic requires element type '{}' to have runtime bits", .{ lhs_ty.elemType2(mod).fmt(pt), }); } const runtime_src = runtime_src: { if (try sema.resolveValue(lhs)) |lhs_value| { if (try sema.resolveValue(rhs)) |rhs_value| { const lhs_ptr = switch (mod.intern_pool.indexToKey(lhs_value.toIntern())) { .undef => return sema.failWithUseOfUndef(block, lhs_src), .ptr => |ptr| ptr, else => unreachable, }; const rhs_ptr = switch (mod.intern_pool.indexToKey(rhs_value.toIntern())) { .undef => return sema.failWithUseOfUndef(block, rhs_src), .ptr => |ptr| ptr, else => unreachable, }; // Make sure the pointers point to the same data. if (!lhs_ptr.base_addr.eql(rhs_ptr.base_addr)) break :runtime_src src; const address = std.math.sub(u64, lhs_ptr.byte_offset, rhs_ptr.byte_offset) catch return sema.fail(block, src, "operation results in overflow", .{}); const result = address / elem_size; return try pt.intRef(Type.usize, result); } else { break :runtime_src lhs_src; } } else { break :runtime_src rhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); const lhs_int = try block.addUnOp(.int_from_ptr, lhs); const rhs_int = try block.addUnOp(.int_from_ptr, rhs); const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int); return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size)); } } else { switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { .add => .ptr_add, .sub => .ptr_sub, else => return sema.failWithInvalidPtrArithmetic(block, src, "pointer-integer", "addition and subtraction"), }; if (!try sema.typeHasRuntimeBits(lhs_ty.elemType2(mod))) { return sema.fail(block, src, "pointer arithmetic requires element type '{}' to have runtime bits", .{ lhs_ty.elemType2(mod).fmt(pt), }); } return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src); }, } } } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_type = resolved_type.scalarType(mod); const scalar_tag = scalar_type.zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs); const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs); const runtime_src: LazySrcLoc, const air_tag: Air.Inst.Tag, const air_tag_safe: Air.Inst.Tag = rs: { switch (zir_tag) { .add, .add_unsafe => { // For integers:intAddSat // If either of the operands are zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return pt.undefRef(resolved_type); } } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .add_optimized else .add; if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return pt.undefRef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx); } return Air.internedToRef(sum.toIntern()); } else { return Air.internedToRef((try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ rhs_src, air_tag, .add_safe }; } else break :rs .{ lhs_src, air_tag, .add_safe }; }, .addwrap => { // Integers only; floats are checked above. // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return Air.internedToRef((try sema.numberAddWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern()); } else break :rs .{ lhs_src, .add_wrap, .add_wrap }; } else break :rs .{ rhs_src, .add_wrap, .add_wrap }; }, .add_sat => { // Integers only; floats are checked above. // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ lhs_src, .add_sat, .add_sat, }; } else break :rs .{ rhs_src, .add_sat, .add_sat, }; }, .sub => { // For integers: // If the rhs is zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the subtraction would // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return pt.undefRef(resolved_type); } } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .sub_optimized else .sub; if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return pt.undefRef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { var overflow_idx: ?usize = null; const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx); } return Air.internedToRef(diff.toIntern()); } else { return Air.internedToRef((try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ rhs_src, air_tag, .sub_safe }; } else break :rs .{ lhs_src, air_tag, .sub_safe }; }, .subwrap => { // Integers only; floats are checked above. // If the RHS is zero, then the LHS is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return Air.internedToRef((try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type)).toIntern()); } else break :rs .{ rhs_src, .sub_wrap, .sub_wrap }; } else break :rs .{ lhs_src, .sub_wrap, .sub_wrap }; }, .sub_sat => { // Integers only; floats are checked above. // If the RHS is zero, then the LHS is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) try sema.intSub(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ rhs_src, .sub_sat, .sub_sat }; } else break :rs .{ lhs_src, .sub_sat, .sub_sat }; }, .mul => { // For integers: // If either of the operands are zero, the result is zero. // If either of the operands are one, the result is the other // operand, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // // For floats: // If either of the operands are undefined, the result is undefined. // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } if (try lhs_val.compareAllWithZeroSema(.eq, pt)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } if (rhs_val.isInf(mod)) { return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern()); } } else if (resolved_type.isAnyFloat()) { break :lz; } const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .mul_optimized else .mul; if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return pt.undefRef(resolved_type); } } if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { return Air.internedToRef((try pt.floatValue(resolved_type, std.math.nan(f128))).toIntern()); } } else if (resolved_type.isAnyFloat()) { break :rz; } const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return pt.undefRef(resolved_type); } } if (is_int) { var overflow_idx: ?usize = null; const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, pt); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } return Air.internedToRef(product.toIntern()); } else { return Air.internedToRef((try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } } else break :rs .{ lhs_src, air_tag, .mul_safe }; } else break :rs .{ rhs_src, air_tag, .mul_safe }; }, .mulwrap => { // Integers only; floats are handled above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } return Air.internedToRef((try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, pt)).toIntern()); } else break :rs .{ lhs_src, .mul_wrap, .mul_wrap }; } else break :rs .{ rhs_src, .mul_wrap, .mul_wrap }; }, .mul_sat => { // Integers only; floats are checked above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 0), else => unreachable, }; const scalar_one = switch (scalar_tag) { .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0), .ComptimeInt, .Int => try pt.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { return pt.undefRef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, pt) else try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, pt); return Air.internedToRef(val.toIntern()); } else break :rs .{ lhs_src, .mul_sat, .mul_sat }; } else break :rs .{ rhs_src, .mul_sat, .mul_sat }; }, else => unreachable, } }; try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety() and want_safety and scalar_tag == .Int) { if (mod.backendSupportsFeature(.safety_checked_instructions)) { if (air_tag != air_tag_safe) { _ = try sema.preparePanicId(block, src, .integer_overflow); } return block.addBinOp(air_tag_safe, casted_lhs, casted_rhs); } else { const maybe_op_ov: ?Air.Inst.Tag = switch (air_tag) { .add => .add_with_overflow, .sub => .sub_with_overflow, .mul => .mul_with_overflow, else => null, }; if (maybe_op_ov) |op_ov_tag| { const op_ov_tuple_ty = try sema.overflowArithmeticTupleType(resolved_type); const op_ov = try block.addInst(.{ .tag = op_ov_tag, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(op_ov_tuple_ty.toIntern()), .payload = try sema.addExtra(Air.Bin{ .lhs = casted_lhs, .rhs = casted_rhs, }), } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = ov_bit, .operation = .Or, } }, }) else ov_bit; const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, src, no_ov, .integer_overflow); return sema.tupleFieldValByIndex(block, src, op_ov, 0, op_ov_tuple_ty); } } } return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn analyzePtrArithmetic( sema: *Sema, block: *Block, op_src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_offset: Air.Inst.Ref, air_tag: Air.Inst.Tag, ptr_src: LazySrcLoc, offset_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); const pt = sema.pt; const mod = pt.zcu; const opt_ptr_val = try sema.resolveValue(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); const ptr_info = ptr_ty.ptrInfo(mod); assert(ptr_info.flags.size == .Many or ptr_info.flags.size == .C); const new_ptr_ty = t: { // Calculate the new pointer alignment. // This code is duplicated in `Type.elemPtrType`. if (ptr_info.flags.alignment == .none) { // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. break :t ptr_ty; } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(pt)); break :a elem_size * off_int; } else elem_size; // The resulting pointer is aligned to the lcd between the offset (an // arbitrary number) and the alignment factor (always a power of two, // non zero). const new_align: Alignment = @enumFromInt(@min( @ctz(addend), @intFromEnum(ptr_info.flags.alignment), )); assert(new_align != .none); break :t try pt.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ .size = ptr_info.flags.size, .alignment = new_align, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, }, }); }; const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { if (ptr_val.isUndef(mod)) return pt.undefRef(new_ptr_ty); const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(pt)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { const new_ptr_val = try pt.getCoerced(try ptr_val.ptrElem(offset_int, pt), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; } else break :rs ptr_src; }; try sema.requireRuntimeBlock(block, op_src, runtime_src); return block.addInst(.{ .tag = air_tag, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(new_ptr_ty.toIntern()), .payload = try sema.addExtra(Air.Bin{ .lhs = ptr, .rhs = offset, }), } }, }); } fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ptr_src = src; // TODO better source location const ptr = try sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } fn zirAsm( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, tmpl_is_expr: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src = block.nodeOffset(extra.data.src_node); const ret_ty_src = block.src(.{ .node_offset_asm_ret_ty = extra.data.src_node }); const outputs_len: u5 = @truncate(extended.small); const inputs_len: u5 = @truncate(extended.small >> 5); const clobbers_len: u5 = @truncate(extended.small >> 10); const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0; const is_global_assembly = sema.func_index == .none; const zir_tags = sema.code.instructions.items(.tag); const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl: Zir.Inst.Ref = @enumFromInt(@intFromEnum(extra.data.asm_source)); const s: []const u8 = try sema.resolveConstString(block, src, tmpl, .{ .needed_comptime_reason = "assembly code must be comptime-known", }); break :blk s; } else sema.code.nullTerminatedString(extra.data.asm_source); if (is_global_assembly) { if (outputs_len != 0) { return sema.fail(block, src, "module-level assembly does not support outputs", .{}); } if (inputs_len != 0) { return sema.fail(block, src, "module-level assembly does not support inputs", .{}); } if (clobbers_len != 0) { return sema.fail(block, src, "module-level assembly does not support clobbers", .{}); } if (is_volatile) { return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{}); } try mod.addGlobalAssembly(sema.owner.unwrap().cau, asm_source); return .void_value; } try sema.requireRuntimeBlock(block, src, null); var extra_i = extra.end; var output_type_bits = extra.data.output_type_bits; var needed_capacity: usize = @typeInfo(Air.Asm).Struct.fields.len + outputs_len + inputs_len; const ConstraintName = struct { c: []const u8, n: []const u8 }; const out_args = try sema.arena.alloc(Air.Inst.Ref, outputs_len); const outputs = try sema.arena.alloc(ConstraintName, outputs_len); var expr_ty = Air.Inst.Ref.void_type; for (out_args, 0..) |*arg, out_i| { const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; if (is_type) { // Indicate the output is the asm instruction return value. arg.* = .none; const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand); expr_ty = Air.internedToRef(out_ty.toIntern()); } else { arg.* = try sema.resolveInst(output.data.operand); } const constraint = sema.code.nullTerminatedString(output.data.constraint); const name = sema.code.nullTerminatedString(output.data.name); needed_capacity += (constraint.len + name.len + (2 + 3)) / 4; if (output.data.operand.toIndex()) |index| { if (zir_tags[@intFromEnum(index)] == .ref) { // TODO: better error location; it would be even nicer if there were notes that pointed at the output and the variable definition return sema.fail(block, src, "asm cannot output to const local '{s}'", .{name}); } } outputs[out_i] = .{ .c = constraint, .n = name }; } const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); extra_i = input.end; const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); switch (uncasted_arg_ty.zigTypeTag(mod)) { .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; }, } const constraint = sema.code.nullTerminatedString(input.data.constraint); const name = sema.code.nullTerminatedString(input.data.name); needed_capacity += (constraint.len + name.len + (2 + 3)) / 4; inputs[arg_i] = .{ .c = constraint, .n = name }; } const clobbers = try sema.arena.alloc([]const u8, clobbers_len); for (clobbers) |*name| { const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_i]); name.* = sema.code.nullTerminatedString(name_index); extra_i += 1; needed_capacity += name.*.len / 4 + 1; } needed_capacity += (asm_source.len + 3) / 4; const gpa = sema.gpa; try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity); const asm_air = try block.addInst(.{ .tag = .assembly, .data = .{ .ty_pl = .{ .ty = expr_ty, .payload = sema.addExtraAssumeCapacity(Air.Asm{ .source_len = @intCast(asm_source.len), .outputs_len = outputs_len, .inputs_len = @intCast(args.len), .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)), }), } }, }); sema.appendRefsAssumeCapacity(out_args); sema.appendRefsAssumeCapacity(args); for (outputs) |o| { const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @memcpy(buffer[0..o.c.len], o.c); buffer[o.c.len] = 0; @memcpy(buffer[o.c.len + 1 ..][0..o.n.len], o.n); buffer[o.c.len + 1 + o.n.len] = 0; sema.air_extra.items.len += (o.c.len + o.n.len + (2 + 3)) / 4; } for (inputs) |input| { const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @memcpy(buffer[0..input.c.len], input.c); buffer[input.c.len] = 0; @memcpy(buffer[input.c.len + 1 ..][0..input.n.len], input.n); buffer[input.c.len + 1 + input.n.len] = 0; sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4; } for (clobbers) |clobber| { const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @memcpy(buffer[0..clobber.len], clobber); buffer[clobber.len] = 0; sema.air_extra.items.len += clobber.len / 4 + 1; } { const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice()); @memcpy(buffer[0..asm_source.len], asm_source); sema.air_extra.items.len += (asm_source.len + 3) / 4; } return asm_air; } /// Only called for equality operators. See also `zirCmp`. fn zirCmpEq( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_ty_tag = lhs_ty.zigTypeTag(mod); const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null return if (op == .eq) .bool_true else .bool_false; } // comparing null with optionals if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(pt)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { return sema.analyzeCmpUnionTag(block, src, lhs, lhs_src, rhs, rhs_src, op); } if (rhs_ty_tag == .Union and (lhs_ty_tag == .EnumLiteral or lhs_ty_tag == .Enum)) { return sema.analyzeCmpUnionTag(block, src, rhs, rhs_src, lhs, lhs_src, op); } if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(lhs)) |lval| { if (try sema.resolveValue(rhs)) |rval| { if (lval.isUndef(mod) or rval.isUndef(mod)) { return pt.undefRef(Type.bool); } const lkey = mod.intern_pool.indexToKey(lval.toIntern()); const rkey = mod.intern_pool.indexToKey(rval.toIntern()); return if ((lkey.err.name == rkey.err.name) == (op == .eq)) .bool_true else .bool_false; } else { break :src rhs_src; } } else { break :src lhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addBinOp(air_tag, lhs, rhs); } if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); return if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) .bool_true else .bool_false; } return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); } fn analyzeCmpUnionTag( sema: *Sema, block: *Block, src: LazySrcLoc, un: Air.Inst.Ref, un_src: LazySrcLoc, tag: Air.Inst.Ref, tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const union_ty = sema.typeOf(un); try union_ty.resolveFields(pt); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(union_ty.srcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(pt)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; // Coerce both the union and the tag to the union's tag type, and then execute the // enum comparison codepath. const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src); const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveValue(coerced_tag)) |enum_val| { if (enum_val.isUndef(mod)) return pt.undefRef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, mod).?; if (field_ty.zigTypeTag(mod) == .NoReturn) { return .bool_false; } } return sema.cmpSelf(block, src, coerced_union, coerced_tag, op, un_src, tag_src); } /// Only called for non-equality operators. See also `zirCmpEq`. fn zirCmp( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = block.nodeOffset(inst_data.src_node); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false); } fn analyzeCmp( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ compareOperatorName(op), resolved_type.fmt(pt), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); return sema.cmpSelf(block, src, casted_lhs, casted_rhs, op, lhs_src, rhs_src); } fn compareOperatorName(comp: std.math.CompareOperator) []const u8 { return switch (comp) { .lt => "<", .lte => "<=", .eq => "==", .gte => ">=", .gt => ">", .neq => "!=", }; } fn cmpSelf( sema: *Sema, block: *Block, src: LazySrcLoc, casted_lhs: Air.Inst.Ref, casted_rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(casted_lhs)) |lhs_val| { if (lhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (try sema.resolveValue(casted_rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return Air.internedToRef(cmp_val.toIntern()); } return if (try sema.compareAll(lhs_val, op, rhs_val, resolved_type)) .bool_true else .bool_false; } else { if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } break :src rhs_src; } } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveValue(casted_rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } break :src lhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized); return block.addBinOp(tag, casted_lhs, casted_rhs); } /// cmp_eq (x, false) => not(x) /// cmp_eq (x, true ) => x /// cmp_neq(x, false) => x /// cmp_neq(x, true ) => not(x) fn runtimeBoolCmp( sema: *Sema, block: *Block, src: LazySrcLoc, op: std.math.CompareOperator, lhs: Air.Inst.Ref, rhs: bool, runtime_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { if ((op == .neq) == rhs) { try sema.requireRuntimeBlock(block, src, runtime_src); return block.addTyOp(.not, Type.bool, lhs); } else { return lhs; } } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, operand_src, inst_data.operand); switch (ty.zigTypeTag(pt.zcu)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(pt)}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, => return pt.intRef(Type.comptime_int, 0), .Bool, .Int, .Float, .Pointer, .Array, .Struct, .Optional, .ErrorUnion, .ErrorSet, .Enum, .Union, .Vector, .Frame, .AnyFrame, => {}, } const val = try ty.lazyAbiSize(pt); return Air.internedToRef(val.toIntern()); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(pt)}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, => return pt.intRef(Type.comptime_int, 0), .Bool, .Int, .Float, .Pointer, .Array, .Struct, .Optional, .ErrorUnion, .ErrorSet, .Enum, .Union, .Vector, .Frame, .AnyFrame, => {}, } const bit_size = try operand_ty.bitSizeAdvanced(pt, .sema); return pt.intRef(Type.comptime_int, bit_size); } fn zirThis( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { _ = extended; const pt = sema.pt; const namespace = pt.zcu.namespacePtr(block.namespace); const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type, false); switch (pt.zcu.intern_pool.indexToKey(new_ty)) { .struct_type, .union_type, .enum_type => try sema.declareDependency(.{ .interned = new_ty }), .opaque_type => {}, else => unreachable, } return Air.internedToRef(new_ty); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const captures = Type.fromInterned(mod.namespacePtr(block.namespace).owner_type).getCaptures(mod); const src_node: i32 = @bitCast(extended.operand); const src = block.nodeOffset(src_node); const capture_ty = switch (captures.get(ip)[extended.small].unwrap()) { .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, .nav_val => |nav| return sema.analyzeNavVal(block, src, nav), .nav_ref => |nav| return sema.analyzeNavRef(src, nav), }; // The comptime case is handled already above. Runtime case below. if (!block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { // TODO: we should probably store this name in the ZIR to avoid this complexity. const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?; const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), }); break :name null; }; const node: std.zig.Ast.Node.Index = @bitCast(src_node + @as(i32, @bitCast(src_base_node))); const token = tree.nodes.items(.main_token)[node]; break :name tree.tokenSlice(token); }; const msg = if (name) |some| try sema.errMsg(src, "'{s}' not accessible outside function scope", .{some}) else try sema.errMsg(src, "variable not accessible outside function scope", .{}); errdefer msg.destroy(sema.gpa); // TODO add "declared here" note break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (!block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?; const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), }); break :name null; }; const node: std.zig.Ast.Node.Index = @bitCast(src_node + @as(i32, @bitCast(src_base_node))); const token = tree.nodes.items(.main_token)[node]; break :name tree.tokenSlice(token); }; const msg = if (name) |some| try sema.errMsg(src, "'{s}' not accessible from inner function", .{some}) else try sema.errMsg(src, "variable not accessible from inner function", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block.nodeOffset(0), msg, "crossed function definition here", .{}); // TODO add "declared here" note break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, Type.fromInterned(capture_ty)); } fn zirRetAddr( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { _ = extended; if (block.is_comptime) { // TODO: we could give a meaningful lazy value here. #14938 return sema.pt.intRef(Type.usize, 0); } else { return block.addNoOp(.ret_addr); } } fn zirFrameAddress( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); try sema.requireRuntimeBlock(block, src, null); return try block.addNoOp(.frame_addr); } fn zirBuiltinSrc( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const fn_name = ip.getNav(zcu.funcInfo(sema.func_index).owner_nav).name; const gpa = sema.gpa; const file_scope = block.getFileScope(zcu); const func_name_val = v: { const func_name_len = fn_name.length(ip); const array_ty = try pt.intern(.{ .array_type = .{ .len = func_name_len, .sentinel = .zero_u8, .child = .u8_type, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ .bytes = fn_name.toString() }, } }), } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(), } }); }; const module_name_val = v: { const module_name = file_scope.mod.fully_qualified_name; const array_ty = try pt.intern(.{ .array_type = .{ .len = module_name.len, .sentinel = .zero_u8, .child = .u8_type, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ .bytes = try ip.getOrPutString(gpa, pt.tid, module_name, .maybe_embedded_nulls), }, } }), } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, module_name.len)).toIntern(), } }); }; const file_name_val = v: { const file_name = file_scope.sub_file_path; const array_ty = try pt.intern(.{ .array_type = .{ .len = file_name.len, .sentinel = .zero_u8, .child = .u8_type, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, .storage = .{ .bytes = try ip.getOrPutString(gpa, pt.tid, file_name, .maybe_embedded_nulls), }, } }), } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(), } }); }; const src_loc_ty = try pt.getBuiltinType("SourceLocation"); const fields = .{ // module: [:0]const u8, module_name_val, // file: [:0]const u8, file_name_val, // fn_name: [:0]const u8, func_name_val, // line: u32, (try pt.intValue(Type.u32, extra.line + 1)).toIntern(), // column: u32, (try pt.intValue(Type.u32, extra.column + 1)).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = src_loc_ty.toIntern(), .storage = .{ .elems = &fields }, } }))); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try pt.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { try sema.declareDependency(.{ .namespace = type_decl_inst }); } switch (ty.zigTypeTag(mod)) { .Type, .Void, .Bool, .NoReturn, .ComptimeFloat, .ComptimeInt, .Undefined, .Null, .EnumLiteral, => |type_info_tag| return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(), .val = .void_value, } }))), .Fn => { const fn_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, fn_info_nav); const fn_info_ty = Type.fromInterned(ip.getNav(fn_info_nav).status.resolved.val); const param_info_nav = try sema.namespaceLookup( block, src, fn_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, param_info_nav); const param_info_ty = Type.fromInterned(ip.getNav(param_info_nav).status.resolved.val); const func_ty_info = mod.typeToFunc(ty).?; const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); for (param_vals, 0..) |*param_val, i| { const param_ty = func_ty_info.param_types.get(ip)[i]; const is_generic = param_ty == .generic_poison_type; const param_ty_val = try pt.intern(.{ .opt = .{ .ty = try pt.intern(.{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @as(u1, @truncate(func_ty_info.noalias_bits >> index)) != 0; }; const param_fields = .{ // is_generic: bool, Value.makeBool(is_generic).toIntern(), // is_noalias: bool, Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; param_val.* = try pt.intern(.{ .aggregate = .{ .ty = param_info_ty.toIntern(), .storage = .{ .elems = ¶m_fields }, } }); } const args_val = v: { const new_decl_ty = try pt.arrayType(.{ .len = param_vals.len, .child = param_info_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); const slice_ty = (try pt.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(), } }); }; const ret_ty_opt = try pt.intern(.{ .opt = .{ .ty = try pt.intern(.{ .opt_type = .type_type }), .val = if (func_ty_info.return_type == .generic_poison_type) .none else func_ty_info.return_type, } }); const callconv_ty = try pt.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, (try pt.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(), // is_generic: bool, Value.makeBool(func_ty_info.is_generic).toIntern(), // is_var_args: bool, Value.makeBool(func_ty_info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = fn_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Int => { const int_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, int_info_nav); const int_info_ty = Type.fromInterned(ip.getNav(int_info_nav).status.resolved.val); const signedness_ty = try pt.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, (try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(), // bits: u16, (try pt.intValue(Type.u16, info.bits)).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = int_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Float => { const float_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, float_info_nav); const float_info_ty = Type.fromInterned(ip.getNav(float_info_nav).status.resolved.val); const field_vals = .{ // bits: u16, (try pt.intValue(Type.u16, ty.bitSize(pt))).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = float_info_ty.toIntern(), .storage = .{ .elems = &field_vals }, } }), } }))); }, .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.flags.alignment.toByteUnits()) |alignment| try pt.intValue(Type.comptime_int, alignment) else try Type.fromInterned(info.child).lazyAbiAlignment(pt); const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const pointer_ty = t: { const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const ptr_size_ty = t: { const nav = try sema.namespaceLookup( block, src, pointer_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ // size: Size, (try pt.enumValueFieldIndex(ptr_size_ty, @intFromEnum(info.flags.size))).toIntern(), // is_const: bool, Value.makeBool(info.flags.is_const).toIntern(), // is_volatile: bool, Value.makeBool(info.flags.is_volatile).toIntern(), // alignment: comptime_int, alignment.toIntern(), // address_space: AddressSpace (try pt.enumValueFieldIndex(addrspace_ty, @intFromEnum(info.flags.address_space))).toIntern(), // child: type, info.child, // is_allowzero: bool, Value.makeBool(info.flags.is_allowzero).toIntern(), // sentinel: ?*const anyopaque, (try sema.optRefValue(switch (info.sentinel) { .none => null, else => Value.fromInterned(info.sentinel), })).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = pointer_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Array => { const array_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const info = ty.arrayInfo(mod); const field_values = .{ // len: comptime_int, (try pt.intValue(Type.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), // sentinel: ?*const anyopaque, (try sema.optRefValue(info.sentinel)).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = array_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Vector => { const vector_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const info = ty.arrayInfo(mod); const field_values = .{ // len: comptime_int, (try pt.intValue(Type.comptime_int, info.len)).toIntern(), // child: type, info.elem_type.toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = vector_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Optional => { const optional_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ // child: type, ty.optionalChild(mod).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = optional_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .ErrorSet => { // Get the Error type const error_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) { .anyerror_type => null, else => |err_set_ty_index| blk: { const names = ip.indexToKey(err_set_ty_index).error_set_type.names; const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, 0..) |*field_val, error_index| { const error_name = names.get(ip)[error_index]; const error_name_len = error_name.length(ip); const error_name_val = v: { const new_decl_ty = try pt.arrayType(.{ .len = error_name_len, .sentinel = .zero_u8, .child = .u8_type, }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = error_name.toString() }, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(), } }); }; const error_field_fields = .{ // name: [:0]const u8, error_name_val, }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = error_field_ty.toIntern(), .storage = .{ .elems = &error_field_fields }, } }); } break :blk vals; }, }; // Build our ?[]const Error value const slice_errors_ty = try pt.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, }); const opt_slice_errors_ty = try pt.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { const array_errors_ty = try pt.arrayType(.{ .len = vals.len, .child = error_field_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_errors_ty.toIntern(), .storage = .{ .elems = vals }, } }); const manyptr_errors_ty = slice_errors_ty.slicePtrFieldType(mod).toIntern(); break :v try pt.intern(.{ .slice = .{ .ty = slice_errors_ty.toIntern(), .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_errors_ty, .base_addr = .{ .uav = .{ .orig_ty = manyptr_errors_ty, .val = new_decl_val, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, vals.len)).toIntern(), } }); } else .none; const errors_val = try pt.intern(.{ .opt = .{ .ty = opt_slice_errors_ty.toIntern(), .val = errors_payload_val, } }); // Construct Type{ .ErrorSet = errors_val } return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(), .val = errors_val, } }))); }, .ErrorUnion => { const error_union_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ // error_set: type, ty.errorUnionSet(mod).toIntern(), // payload: type, ty.errorUnionPayload(mod).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = error_union_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Enum => { const is_exhaustive = Value.makeBool(ip.loadEnumType(ty.toIntern()).tag_mode != .nonexhaustive); const enum_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.loadEnumType(ty.toIntern()).names.len); for (enum_field_vals, 0..) |*field_val, tag_index| { const enum_type = ip.loadEnumType(ty.toIntern()); const value_val = if (enum_type.values.len > 0) try ip.getCoercedInts( mod.gpa, pt.tid, ip.indexToKey(enum_type.values.get(ip)[tag_index]).int, .comptime_int_type, ) else (try pt.intValue(Type.comptime_int, tag_index)).toIntern(); // TODO: write something like getCoercedInts to avoid needing to dupe const name_val = v: { const tag_name = enum_type.names.get(ip)[tag_index]; const tag_name_len = tag_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = tag_name_len, .sentinel = .zero_u8, .child = .u8_type, }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(), } }); }; const enum_field_fields = .{ // name: [:0]const u8, name_val, // value: comptime_int, value_val, }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = enum_field_ty.toIntern(), .storage = .{ .elems = &enum_field_fields }, } }); } const fields_val = v: { const fields_array_ty = try pt.arrayType(.{ .len = enum_field_vals.len, .child = enum_field_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); const slice_ty = (try pt.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = manyptr_ty, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.loadEnumType(ty.toIntern()).namespace.toOptional()); const type_enum_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ // tag_type: type, ip.loadEnumType(ty.toIntern()).tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, is_exhaustive.toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = type_enum_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Union => { const type_union_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const union_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveLayout(pt); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.flagsUnordered(ip).layout; const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len); defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, field_index| { const name_val = v: { const field_name = tag_type.names.get(ip)[field_index]; const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; const alignment = switch (layout) { .auto, .@"extern" => try pt.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; const field_ty = union_obj.field_types.get(ip)[field_index]; const union_field_fields = .{ // name: [:0]const u8, name_val, // type: type, field_ty, // alignment: comptime_int, (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = union_field_ty.toIntern(), .storage = .{ .elems = &union_field_fields }, } }); } const fields_val = v: { const array_fields_ty = try pt.arrayType(.{ .len = union_field_vals.len, .child = union_field_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); const slice_ty = (try pt.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod).toOptional()); const enum_tag_ty_val = try pt.intern(.{ .opt = .{ .ty = (try pt.optionalType(.type_type)).toIntern(), .val = if (ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, } }); const container_layout_ty = t: { const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ // layout: ContainerLayout, (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, // fields: []const UnionField, fields_val, // decls: []const Declaration, decls_val, }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = type_union_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Struct => { const type_struct_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const struct_field_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveLayout(pt); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct_type| { struct_field_vals = try gpa.alloc(InternPool.Index, anon_struct_type.types.len); for (struct_field_vals, 0..) |*struct_field_val, field_index| { const field_ty = anon_struct_type.types.get(ip)[field_index]; const field_val = anon_struct_type.values.get(ip)[field_index]; const name_val = v: { const field_name = if (anon_struct_type.names.len != 0) anon_struct_type.names.get(ip)[field_index] else try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; try Type.fromInterned(field_ty).resolveLayout(pt); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; const default_val_ptr = try sema.optRefValue(opt_default_val); const struct_field_fields = .{ // name: [:0]const u8, name_val, // type: type, field_ty, // default_value: ?*const anyopaque, default_val_ptr.toIntern(), // is_comptime: bool, Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(pt).toByteUnits() orelse 0)).toIntern(), }; struct_field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } break :fv; }, .struct_type => ip.loadStructType(ty.toIntern()), else => unreachable, }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); try ty.resolveStructFieldInits(pt); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| field_name else try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); const field_init = struct_type.fieldInit(ip, field_index); const field_is_comptime = struct_type.fieldIsComptime(ip, field_index); const name_val = v: { const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, .sentinel = .zero_u8, .child = .u8_type, }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = new_decl_ty.toIntern(), .storage = .{ .bytes = field_name.toString() }, } }); break :v try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(), } }); }; const opt_default_val = if (field_init == .none) null else Value.fromInterned(field_init); const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, else => try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, .sema, ), }; const struct_field_fields = .{ // name: [:0]const u8, name_val, // type: type, field_ty.toIntern(), // default_value: ?*const anyopaque, default_val_ptr.toIntern(), // is_comptime: bool, Value.makeBool(field_is_comptime).toIntern(), // alignment: comptime_int, (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try pt.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } } const fields_val = v: { const array_fields_ty = try pt.arrayType(.{ .len = struct_field_vals.len, .child = struct_field_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); const slice_ty = (try pt.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); break :v try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); const backing_integer_val = try pt.intern(.{ .opt = .{ .ty = (try pt.optionalType(.type_type)).toIntern(), .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: { assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(mod)); break :val packed_struct.backingIntTypeUnordered(ip); } else .none, } }); const container_layout_ty = t: { const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const layout = ty.containerLayout(mod); const field_values = [_]InternPool.Index{ // layout: ContainerLayout, (try pt.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, fields_val, // decls: []const Declaration, decls_val, // is_tuple: bool, Value.makeBool(ty.isTuple(mod)).toIntern(), }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = type_struct_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Opaque => { const type_opaque_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveFields(pt); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); const field_values = .{ // decls: []const Declaration, decls_val, }; return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(), .val = try pt.intern(.{ .aggregate = .{ .ty = type_opaque_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } }))); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), } } fn typeInfoDecls( sema: *Sema, block: *Block, src: LazySrcLoc, type_info_ty: Type, opt_namespace: InternPool.OptionalNamespaceIndex, ) CompileError!InternPool.Index { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; const declaration_ty = t: { const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(zcu), try ip.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), ) orelse @panic("std.builtin.Type is corrupt"); try sema.ensureNavResolved(src, nav); break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); try sema.typeInfoNamespaceDecls(block, src, opt_namespace, declaration_ty, &decl_vals, &seen_namespaces); const array_decl_ty = try pt.arrayType(.{ .len = decl_vals.items.len, .child = declaration_ty.toIntern(), }); const new_decl_val = try pt.intern(.{ .aggregate = .{ .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); const slice_ty = (try pt.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, .is_const = true, }, })).toIntern(); const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern(); return try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(), } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, src: LazySrcLoc, opt_namespace_index: InternPool.OptionalNamespaceIndex, declaration_ty: Type, decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return; try pt.ensureNamespaceUpToDate(namespace_index); const namespace = zcu.namespacePtr(namespace_index); const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; for (namespace.pub_decls.keys()) |nav| { const name = ip.getNav(nav).name; const name_val = name_val: { const name_len = name.length(ip); const array_ty = try pt.arrayType(.{ .len = name_len, .sentinel = .zero_u8, .child = .u8_type, }); const array_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .bytes = name.toString() }, } }); break :name_val try pt.intern(.{ .slice = .{ .ty = .slice_const_u8_sentinel_0_type, // [:0]const u8 .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, // [*:0]const u8 .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = array_val, } }, .byte_offset = 0, }, }), .len = (try pt.intValue(Type.usize, name_len)).toIntern(), }, }); }; const fields = [_]InternPool.Index{ // name: [:0]const u8, name_val, }; try decl_vals.append(try pt.intern(.{ .aggregate = .{ .ty = declaration_ty.toIntern(), .storage = .{ .elems = &fields }, } })); } for (namespace.pub_usingnamespace.items) |nav| { if (ip.getNav(nav).analysis_owner.unwrap()) |cau| { if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .cau = cau }))) { continue; } } try sema.ensureNavResolved(src, nav); const namespace_ty = Type.fromInterned(ip.getNav(nav).status.resolved.val); try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu).toOptional(), declaration_ty, decl_vals, seen_namespaces); } } fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); return Air.internedToRef(operand_ty.toIntern()); } fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); var child_block: Block = .{ .parent = block, .sema = sema, .namespace = block.namespace, .instructions = .{}, .inlining = block.inlining, .is_comptime = false, .is_typeof = true, .want_safety = false, .error_return_trace_index = block.error_return_trace_index, .src_base_inst = block.src_base_inst, .type_name_ctx = block.type_name_ctx, }; defer child_block.instructions.deinit(sema.gpa); const operand = try sema.resolveInlineBody(&child_block, body, inst); const operand_ty = sema.typeOf(operand); if (operand_ty.isGenericPoison()) return error.GenericPoison; return Air.internedToRef(operand_ty.toIntern()); } fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const res_ty = try sema.log2IntType(block, operand_ty, src); return Air.internedToRef(res_ty.toIntern()); } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { const pt = sema.pt; const mod = pt.zcu; switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { const bits = operand.bitSize(pt); const count = if (bits == 0) 0 else blk: { var count: u16 = 0; var s = bits - 1; while (s != 0) : (s >>= 1) { count += 1; } break :blk count; }; return pt.intType(.unsigned, count); }, .Vector => { const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return pt.vectorType(.{ .len = operand.vectorLen(mod), .child = log2_elem_ty.toIntern(), }); }, else => {}, } return sema.fail( block, src, "bit shifting operation expected integer type, found '{}'", .{operand.fmt(pt)}, ); } fn zirTypeofPeer( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.TypeOfPeer, extended.operand); const src = block.nodeOffset(extra.data.src_node); const body = sema.code.bodySlice(extra.data.body_index, extra.data.body_len); var child_block: Block = .{ .parent = block, .sema = sema, .namespace = block.namespace, .instructions = .{}, .inlining = block.inlining, .is_comptime = false, .is_typeof = true, .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, .src_base_inst = block.src_base_inst, .type_name_ctx = block.type_name_ctx, }; defer child_block.instructions.deinit(sema.gpa); // Ignore the result, we only care about the instructions in `args`. _ = try sema.analyzeInlineBody(&child_block, body, inst); const args = sema.code.refSlice(extra.end, extended.small); const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len); defer sema.gpa.free(inst_list); for (args, 0..) |arg_ref, i| { inst_list[i] = try sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node }); return Air.internedToRef(result_type.toIntern()); } fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node }); const uncasted_operand = try sema.resolveInst(inst_data.operand); const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveValue(operand)) |val| { return if (val.isUndef(mod)) pt.undefRef(Type.bool) else if (val.toBool()) .bool_false else .bool_true; } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.not, Type.bool, operand); } fn zirBoolBr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, is_bool_or: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const datas = sema.code.instructions.items(.data); const inst_data = datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.BoolBr, inst_data.payload_index); const uncoerced_lhs = try sema.resolveInst(extra.data.lhs); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const lhs_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = parent_block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src); if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (is_bool_or and lhs_val.toBool()) { return .bool_true; } else if (!is_bool_or and !lhs_val.toBool()) { return .bool_false; } // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 // break instruction (`break_inline`). const rhs_result = try sema.resolveInlineBody(parent_block, body, inst); if (sema.typeOf(rhs_result).isNoReturn(mod)) { return rhs_result; } return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src); } const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .bool_type, .payload = undefined, } }, }); var child_block = parent_block.makeSubBlock(); child_block.runtime_loop = null; child_block.runtime_cond = lhs_src; child_block.runtime_index.increment(); defer child_block.instructions.deinit(gpa); var then_block = child_block.makeSubBlock(); defer then_block.instructions.deinit(gpa); var else_block = child_block.makeSubBlock(); defer else_block.instructions.deinit(gpa); const lhs_block = if (is_bool_or) &then_block else &else_block; const rhs_block = if (is_bool_or) &else_block else &then_block; const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveInlineBody(rhs_block, body, inst); const rhs_noret = sema.typeOf(rhs_result).isNoReturn(mod); const coerced_rhs_result = if (!rhs_noret) rhs: { const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src); _ = try rhs_block.addBr(block_inst, coerced_result); break :rhs coerced_result; } else rhs_result; const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); if (!rhs_noret) { if (try sema.resolveDefinedValue(rhs_block, rhs_src, coerced_rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool()) { return .bool_true; } else if (!is_bool_or and !rhs_val.toBool()) { return .bool_false; } } } return result; } fn finishCondBr( sema: *Sema, parent_block: *Block, child_block: *Block, then_block: *Block, else_block: *Block, cond: Air.Inst.Ref, block_inst: Air.Inst.Index, ) !Air.Inst.Ref { const gpa = sema.gpa; try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(then_block.instructions.items.len), .else_body_len = @intCast(else_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items)); _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = cond, .payload = cond_br_payload, } } }); sema.air_instructions.items(.data)[@intFromEnum(block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @intCast(child_block.instructions.items.len) }, ); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(child_block.instructions.items)); try parent_block.instructions.append(gpa, block_inst); return block_inst.toRef(); } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); } fn zirIsNonNull( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); try sema.checkNullableType(block, src, sema.typeOf(operand)); return sema.analyzeIsNull(block, src, operand, true); } fn zirIsNonNullPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ptr = try sema.resolveInst(inst_data.operand); try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveValue(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ ty.fmt(pt), }), } } fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); try sema.checkErrorType(block, src, sema.typeOf(operand)); return sema.analyzeIsNonErr(block, src, operand); } fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ptr = try sema.resolveInst(inst_data.operand); try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } fn zirRetIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, src, operand); } fn zirCondbr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, ) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const cond_src = parent_block.src(.{ .node_offset_if_cond = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.bodySlice(extra.end, extra.data.then_body_len); const else_body = sema.code.bodySlice(extra.end + then_body.len, extra.data.else_body_len); const uncasted_cond = try sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| { const body = if (cond_val.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src); // We use `analyzeBodyInner` since we want to propagate any comptime control flow to the caller. return sema.analyzeBodyInner(parent_block, body); } const gpa = sema.gpa; // We'll re-use the sub block to save on memory bandwidth, and yank out the // instructions array in between using it for the then block and else block. var sub_block = parent_block.makeSubBlock(); sub_block.runtime_loop = null; sub_block.runtime_cond = cond_src; sub_block.runtime_index.increment(); sub_block.need_debug_scope = null; // this body is emitted regardless defer sub_block.instructions.deinit(gpa); try sema.analyzeBodyRuntimeBreak(&sub_block, then_body); const true_instructions = try sub_block.instructions.toOwnedSlice(gpa); defer gpa.free(true_instructions); const err_cond = blk: { const index = extra.data.condition.toIndex() orelse break :blk null; if (sema.code.instructions.items(.tag)[@intFromEnum(index)] != .is_non_err) break :blk null; const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); const result_ty = operand_ty.errorUnionSet(mod); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; if (err_cond != null and try sema.maybeErrorUnwrap(&sub_block, else_body, err_cond.?, cond_src, false)) { // nothing to do } else { try sema.analyzeBodyRuntimeBreak(&sub_block, else_body); } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + true_instructions.len + sub_block.instructions.items.len); _ = try parent_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(true_instructions.len), .else_body_len = @intCast(sub_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items)); } fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(inst_data.src_node); const operand_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); const pt = sema.pt; const mod = pt.zcu; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; if (is_non_err_val.toBool()) { return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false); } // We can analyze the body directly in the parent block because we know there are // no breaks from the body possible, and that the body is noreturn. try sema.analyzeBodyInner(parent_block, body); return .unreachable_value; } var sub_block = parent_block.makeSubBlock(); defer sub_block.instructions.deinit(sema.gpa); // This body is guaranteed to end with noreturn and has no breaks. try sema.analyzeBodyInner(&sub_block, body); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).Struct.fields.len + sub_block.instructions.items.len); const try_inst = try parent_block.addInst(.{ .tag = .@"try", .data = .{ .pl_op = .{ .operand = err_union, .payload = sema.addExtraAssumeCapacity(Air.Try{ .body_len = @intCast(sub_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items)); return try_inst; } fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = parent_block.nodeOffset(inst_data.src_node); const operand_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Try, inst_data.payload_index); const body = sema.code.bodySlice(extra.end, extra.data.body_len); const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); const pt = sema.pt; const mod = pt.zcu; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(pt), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; if (is_non_err_val.toBool()) { return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false); } // We can analyze the body directly in the parent block because we know there are // no breaks from the body possible, and that the body is noreturn. try sema.analyzeBodyInner(parent_block, body); return .unreachable_value; } var sub_block = parent_block.makeSubBlock(); defer sub_block.instructions.deinit(sema.gpa); // This body is guaranteed to end with noreturn and has no breaks. try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try pt.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, }, }); const res_ty_ref = Air.internedToRef(res_ty.toIntern()); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).Struct.fields.len + sub_block.instructions.items.len); const try_inst = try parent_block.addInst(.{ .tag = .try_ptr, .data = .{ .ty_pl = .{ .ty = res_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.TryPtr{ .ptr = operand, .body_len = @intCast(sub_block.instructions.items.len), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items)); return try_inst; } fn ensurePostHoc(sema: *Sema, block: *Block, dest_block: Zir.Inst.Index) !*LabeledBlock { const gop = sema.inst_map.getOrPutAssumeCapacity(dest_block); if (gop.found_existing) existing: { // This may be a *result* from an earlier iteration of an inline loop. // In this case, there will not be a post-hoc block entry, and we can // continue with the logic below. const new_block_inst = gop.value_ptr.*.toIndex() orelse break :existing; return sema.post_hoc_blocks.get(new_block_inst) orelse break :existing; } try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1); const new_block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); gop.value_ptr.* = new_block_inst.toRef(); try sema.air_instructions.append(sema.gpa, .{ .tag = .block, .data = undefined, }); const labeled_block = try sema.gpa.create(LabeledBlock); labeled_block.* = .{ .label = .{ .zir_block = dest_block, .merges = .{ .src_locs = .{}, .results = .{}, .br_list = .{}, .block_inst = new_block_inst, }, }, .block = .{ .parent = block, .sema = sema, .namespace = block.namespace, .instructions = .{}, .label = &labeled_block.label, .inlining = block.inlining, .is_comptime = block.is_comptime, .src_base_inst = block.src_base_inst, .type_name_ctx = block.type_name_ctx, }, }; sema.post_hoc_blocks.putAssumeCapacityNoClobber(new_block_inst, labeled_block); return labeled_block; } /// A `break` statement is inside a runtime condition, but trying to /// break from an inline loop. In such case we must convert it to /// a runtime break. fn addRuntimeBreak(sema: *Sema, child_block: *Block, block_inst: Zir.Inst.Index, break_operand: Zir.Inst.Ref) !void { const labeled_block = try sema.ensurePostHoc(child_block, block_inst); const operand = try sema.resolveInst(break_operand); const br_ref = try child_block.addBr(labeled_block.label.merges.block_inst, operand); try labeled_block.label.merges.results.append(sema.gpa, operand); try labeled_block.label.merges.br_list.append(sema.gpa, br_ref.toIndex().?); try labeled_block.label.merges.src_locs.append(sema.gpa, null); labeled_block.block.runtime_index.increment(); if (labeled_block.block.runtime_cond == null and labeled_block.block.runtime_loop == null) { labeled_block.block.runtime_cond = child_block.runtime_cond orelse child_block.runtime_loop; labeled_block.block.runtime_loop = child_block.runtime_loop; } } fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"unreachable"; const src = block.nodeOffset(inst_data.src_node); if (block.is_comptime) { return sema.fail(block, src, "reached unreachable code", .{}); } // TODO Add compile error for @optimizeFor occurring too late in a scope. sema.analyzeUnreachable(block, src, true) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; if (!mem.eql(u8, msg.msg, "runtime safety check not allowed in naked function")) return err; try sema.errNote(src, msg, "the end of a naked function is implicitly unreachable", .{}); return err; }, else => |e| return e, }; } fn zirRetErrValue( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].str_tok; const src = block.tokenOffset(inst_data.src_tok); const err_name = try mod.intern_pool.getOrPutString( sema.gpa, pt.tid, inst_data.get(sema.code), .no_embedded_nulls, ); _ = try pt.getErrorValue(err_name); // Return the error code from the function. const error_set_type = try pt.singleErrorSetType(err_name); const result_inst = Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = err_name, } }))); return sema.analyzeRet(block, result_inst, src, src); } fn zirRetImplicit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok; const r_brace_src = block.tokenOffset(inst_data.src_tok); if (block.inlining == null and sema.func_is_naked) { assert(!block.is_comptime); if (block.wantSafety()) { // Calling a safety function from a naked function would not be legal. _ = try block.addNoOp(.trap); } else { try sema.analyzeUnreachable(block, r_brace_src, false); } return; } const operand = try sema.resolveInst(inst_data.operand); const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = 0 }); const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "function declared '{}' implicitly returns", .{ sema.fn_ret_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ sema.fn_ret_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(r_brace_src, msg, "control flow reaches end of body here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } return sema.analyzeRet(block, operand, r_brace_src, r_brace_src); } fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); return sema.analyzeRet(block, operand, src, block.src(.{ .node_offset_return_operand = inst_data.src_node })); } fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ret_ptr = try sema.resolveInst(inst_data.operand); if (block.is_comptime or block.inlining != null or sema.func_is_naked) { const operand = try sema.analyzeLoad(block, src, ret_ptr, src); return sema.analyzeRet(block, operand, src, block.src(.{ .node_offset_return_operand = inst_data.src_node })); } if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) { const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr); return sema.retWithErrTracing(block, src, is_non_err, .ret_load, ret_ptr); } _ = try block.addUnOp(.ret_load, ret_ptr); } fn retWithErrTracing( sema: *Sema, block: *Block, src: LazySrcLoc, is_non_err: Air.Inst.Ref, ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!void { const pt = sema.pt; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); return; }, .bool_false => false, else => true, }; const gpa = sema.gpa; const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try pt.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { try sema.callBuiltin(block, src, return_err_fn, .never_inline, &args, .@"error return"); _ = try block.addUnOp(ret_tag, operand); return; } var then_block = block.makeSubBlock(); defer then_block.instructions.deinit(gpa); _ = try then_block.addUnOp(ret_tag, operand); var else_block = block.makeSubBlock(); defer else_block.instructions.deinit(gpa); try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &args, .@"error return"); _ = try else_block.addUnOp(ret_tag, operand); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(then_block.instructions.items.len), .else_body_len = @intCast(else_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items)); _ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_err, .payload = cond_br_payload, } } }); } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { const pt = sema.pt; const mod = pt.zcu; return fn_ret_ty.isError(mod) and mod.comp.config.any_error_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].save_err_ret_index; if (!block.ownerModule().error_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); break :b operand_ty.isError(mod); }; if (save_index) block.error_return_trace_index = try sema.analyzeSaveErrRetIndex(block); } fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const extra = sema.code.extraData(Zir.Inst.RestoreErrRetIndex, extended.operand).data; return sema.restoreErrRetIndex(start_block, start_block.nodeOffset(extra.src_node), extra.block, extra.operand); } /// If `operand` is non-error (or is `none`), restores the error return trace to /// its state at the point `block` was reached (or, if `block` is `none`, the /// point this function began execution). fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_block: Zir.Inst.Ref, operand_zir: Zir.Inst.Ref) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const saved_index = if (target_block.toIndexAllowNone()) |zir_block| b: { var block = start_block; while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { const target_trace_index = if (block.parent) |parent_block| tgt: { break :tgt parent_block.error_return_trace_index; } else sema.error_return_trace_index_on_fn_entry; if (start_block.error_return_trace_index != target_trace_index) break :b target_trace_index; return; // No need to restore } } block = block.parent.?; } } else b: { if (start_block.error_return_trace_index != sema.error_return_trace_index_on_fn_entry) break :b sema.error_return_trace_index_on_fn_entry; return; // No need to restore }; const operand = try sema.resolveInstAllowNone(operand_zir); if (start_block.is_comptime or start_block.is_typeof) { const is_non_error = if (operand != .none) blk: { const is_non_error_inst = try sema.analyzeIsNonErr(start_block, src, operand); const cond_val = try sema.resolveDefinedValue(start_block, src, is_non_error_inst); break :blk cond_val.?.toBool(); } else true; // no operand means pop unconditionally if (is_non_error) return; const saved_index_val = try sema.resolveDefinedValue(start_block, src, saved_index); const saved_index_int = saved_index_val.?.toUnsignedInt(pt); assert(saved_index_int <= sema.comptime_err_ret_trace.items.len); sema.comptime_err_ret_trace.items.len = @intCast(saved_index_int); return; } if (!mod.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).calls_or_awaits_errorable_fn) return; if (!start_block.ownerModule().error_tracing) return; assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere return sema.popErrorReturnTrace(start_block, src, operand, saved_index); } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern(); switch (err_set_ty) { .adhoc_inferred_error_set_type => { const ies = sema.fn_ret_ty_ies.?; assert(ies.func == .none); try sema.addToInferredErrorSetPtr(ies, sema.typeOf(uncasted_operand)); }, else => if (ip.isInferredErrorSetType(err_set_ty)) { const ies = sema.fn_ret_ty_ies.?; assert(ies.func == sema.owner.unwrap().func); try sema.addToInferredErrorSetPtr(ies, sema.typeOf(uncasted_operand)); }, } } fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !void { const arena = sema.arena; const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; switch (op_ty.zigTypeTag(mod)) { .ErrorSet => try ies.addErrorSet(op_ty, ip, arena), .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, arena), else => {}, } } fn analyzeRet( sema: *Sema, block: *Block, uncasted_operand: Air.Inst.Ref, src: LazySrcLoc, operand_src: LazySrcLoc, ) CompileError!void { // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. const pt = sema.pt; const mod = pt.zcu; if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; if (block.inlining) |inlining| { if (block.is_comptime) { const ret_val = try sema.resolveConstValue(block, operand_src, operand, .{ .needed_comptime_reason = "value being returned at comptime must be comptime-known", }); inlining.comptime_result = operand; if (sema.fn_ret_ty.isError(mod) and ret_val.getErrorName(mod) != .none) { try sema.comptime_err_ret_trace.append(src); } return error.ComptimeReturn; } // We are inlining a function call; rewrite the `ret` as a `break`. const br_inst = try block.addBr(inlining.merges.block_inst, operand); try inlining.merges.results.append(sema.gpa, operand); try inlining.merges.br_list.append(sema.gpa, br_inst.toIndex().?); try inlining.merges.src_locs.append(sema.gpa, operand_src); return; } else if (block.is_comptime) { return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{}); } else if (sema.func_is_naked) { const msg = msg: { const msg = try sema.errMsg(src, "cannot return from naked function", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "can only return using assembly", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } try sema.fn_ret_ty.resolveLayout(pt); try sema.validateRuntimeValue(block, operand_src, operand); const air_tag: Air.Inst.Tag = if (block.wantSafety()) .ret_safe else .ret; if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) { // Avoid adding a frame to the error return trace in case the value is comptime-known // to be not an error. const is_non_err = try sema.analyzeIsNonErr(block, operand_src, operand); return sema.retWithErrTracing(block, src, is_non_err, air_tag, operand); } _ = try block.addUnOp(air_tag, operand); } fn floatOpAllowed(tag: Zir.Inst.Tag) bool { // extend this swich as additional operators are implemented return switch (tag) { .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true, else => false, }; } fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src = block.src(.{ .node_offset_ptr_elem = extra.data.src_node }); const sentinel_src = block.src(.{ .node_offset_ptr_sentinel = extra.data.src_node }); const align_src = block.src(.{ .node_offset_ptr_align = extra.data.src_node }); const addrspace_src = block.src(.{ .node_offset_ptr_addrspace = extra.data.src_node }); const bitoffset_src = block.src(.{ .node_offset_ptr_bitoffset = extra.data.src_node }); const hostsize_src = block.src(.{ .node_offset_ptr_hostsize = extra.data.src_node }); const elem_ty = blk: { const air_inst = try sema.resolveInst(extra.data.elem_type); const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| { if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) { try sema.errNote(elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{}); } return err; }; if (ty.isGenericPoison()) return error.GenericPoison; break :blk ty; }; if (elem_ty.zigTypeTag(mod) == .NoReturn) return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); const target = mod.getTarget(); var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); const val = try sema.resolveConstDefinedValue(block, sentinel_src, coerced, .{ .needed_comptime_reason = "pointer sentinel value must be comptime-known", }); break :blk val.toIntern(); } else .none; const abi_align: Alignment = if (inst_data.flags.has_align) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{ .needed_comptime_reason = "pointer alignment must be comptime-known", }); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, else => {}, }, else => {}, } const align_bytes = (try val.getUnsignedIntAdvanced(pt, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAddressSpace(block, addrspace_src, ref, .pointer); } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, .{ .needed_comptime_reason = "pointer bit-offset must be comptime-known", }); break :blk @intCast(bit_offset); } else 0; const host_size: u16 = if (inst_data.flags.has_bit_range) blk: { const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]); extra_i += 1; const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, .{ .needed_comptime_reason = "pointer host size must be comptime-known", }); break :blk @intCast(host_size); } else 0; if (host_size != 0) { if (bit_offset >= host_size * 8) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} starts {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(pt), bit_offset, bit_offset - host_size * 8, host_size, }); } const elem_bit_size = try elem_ty.bitSizeAdvanced(pt, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(pt), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, }); } } if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src, elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } if (host_size != 0 and !try sema.validatePackedType(elem_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, elem_ty_src, elem_ty); break :msg msg; }); } const ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ .alignment = abi_align, .address_space = address_space, .is_const = !inst_data.flags.is_mutable, .is_allowzero = inst_data.flags.is_allowzero, .is_volatile = inst_data.flags.is_volatile, .size = inst_data.size, }, .packed_offset = .{ .bit_offset = bit_offset, .host_size = host_size, }, }); return Air.internedToRef(ty.toIntern()); } fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty_src = block.src(.{ .node_offset_init_ty = inst_data.src_node }); const obj_ty = try sema.resolveType(block, ty_src, inst_data.operand); const pt = sema.pt; const mod = pt.zcu; switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return Air.internedToRef(Value.void.toIntern()), .Union => return sema.fail(block, src, "union initializer must initialize one field", .{}), else => return sema.failWithArrayInitNotSupported(block, src, obj_ty), } } fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_byref: bool) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { // Generic poison means this is an untyped anonymous empty struct init error.GenericPoison => return .empty_struct, else => |e| return e, }; const init_ty = if (is_byref) ty: { const ptr_ty = ty_operand.optEuBaseType(mod); assert(ptr_ty.zigTypeTag(mod) == .Pointer); // validated by a previous instruction if (!ptr_ty.isSlice(mod)) { break :ty ptr_ty.childType(mod); } // To make `&.{}` a `[:s]T`, the init should be a `[0:s]T`. break :ty try pt.arrayType(.{ .len = 0, .sentinel = if (ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .child = ptr_ty.childType(mod).toIntern(), }); } else ty_operand; const obj_ty = init_ty.optEuBaseType(mod); const empty_ref = switch (obj_ty.zigTypeTag(mod)) { .Struct => try sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => try sema.arrayInitEmpty(block, src, obj_ty), .Union => return sema.fail(block, src, "union initializer must initialize one field", .{}), else => return sema.failWithArrayInitNotSupported(block, src, obj_ty), }; const init_ref = try sema.coerce(block, init_ty, empty_ref, src); if (is_byref) { const init_val = (try sema.resolveValue(init_ref)).?; return sema.uavRef(init_val.toIntern()); } else { return init_ref; } } fn structInitEmpty( sema: *Sema, block: *Block, struct_ty: Type, dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. try struct_ty.resolveFields(pt); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); return sema.finishStructInit(block, init_src, dest_src, field_inits, struct_ty, struct_ty, false); } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = obj_ty.toIntern(), .storage = .{ .elems = &.{} }, } }))); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const field_src = block.builtinCallArgSrc(inst_data.src_node, 1); const init_src = block.builtinCallArgSrc(inst_data.src_node, 2); const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); if (union_ty.zigTypeTag(pt.zcu) != .Union) { return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(pt)}); } const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{ .needed_comptime_reason = "name of field being initialized must be comptime-known", }); const init = try sema.resolveInst(extra.init); return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); } fn unionInit( sema: *Sema, block: *Block, uncasted_init: Air.Inst.Ref, init_src: LazySrcLoc, union_ty: Type, union_ty_src: LazySrcLoc, field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]); const init = try sema.coerce(block, field_ty, uncasted_init, init_src); if (try sema.resolveValue(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); return Air.internedToRef((try pt.intern(.{ .un = .{ .ty = union_ty.toIntern(), .tag = tag_val.toIntern(), .val = init_val.toIntern(), } }))); } try sema.requireRuntimeBlock(block, init_src, null); _ = union_ty_src; return block.addUnionInit(union_ty, field_index, init); } fn zirStructInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = block.nodeOffset(inst_data.src_node); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[@intFromEnum(first_item.field_type)].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const result_ty = sema.resolveType(block, src, first_field_type_extra.container_type) catch |err| switch (err) { error.GenericPoison => { // The type wasn't actually known, so treat this as an anon struct init. return sema.structInitAnon(block, src, .typed_init, extra.data, extra.end, is_ref); }, else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); try resolved_ty.resolveLayout(pt); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod)); defer gpa.free(found_fields); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); var field_i: u32 = 0; var extra_index = extra.end; const is_packed = resolved_ty.containerLayout(mod) == .@"packed"; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node; const field_src = block.src(.{ .node_offset_initializer = field_type_data.src_node }); const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else try sema.structFieldIndex(block, resolved_ty, field_name, field_src); assert(field_inits[field_index] == .none); found_fields[field_index] = item.data.field_type; const uncoerced_init = try sema.resolveInst(item.data.init); const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { try resolved_ty.resolveStructFieldInits(pt); if (try resolved_ty.structFieldValueComptime(pt, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); }; if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } } } } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, result_ty, is_ref); } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end); const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node; const field_src = block.src(.{ .node_offset_initializer = field_type_data.src_node }); const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = try ip.getOrPutString( gpa, pt.tid, sema.code.nullTerminatedString(field_type_extra.name_start), .no_embedded_nulls, ); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]); if (field_ty.zigTypeTag(mod) == .NoReturn) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); try sema.addFieldErrNote(resolved_ty, field_index, msg, "field '{}' declared here", .{ field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, resolved_ty); break :msg msg; }); } const uncoerced_init_inst = try sema.resolveInst(item.data.init); const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src); if (try sema.resolveValue(init_inst)) |val| { const struct_val = Value.fromInterned(try pt.intern(.{ .un = .{ .ty = resolved_ty.toIntern(), .tag = tag_val.toIntern(), .val = val.toIntern(), } })); const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src); const final_val = (try sema.resolveValue(final_val_inst)).?; return sema.addConstantMaybeRef(final_val.toIntern(), is_ref); } if (try sema.typeRequiresComptime(resolved_ty)) { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "initializer of comptime only union must be comptime-known", }); } try sema.validateRuntimeValue(block, field_src, init_inst); if (is_ref) { const target = mod.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); const base_ptr = try sema.optEuBasePtrInit(block, alloc, src); const field_ptr = try sema.unionFieldPtr(block, field_src, base_ptr, field_name, field_src, resolved_ty, true); try sema.storePtr(block, src, field_ptr, init_inst); const new_tag = Air.internedToRef(tag_val.toIntern()); _ = try block.addBinOp(.set_union_tag, base_ptr, new_tag); return sema.makePtrConst(block, alloc); } try sema.requireRuntimeBlock(block, src, null); const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst); return sema.coerce(block, result_ty, union_val, src); } unreachable; } fn finishStructInit( sema: *Sema, block: *Block, init_src: LazySrcLoc, dest_src: LazySrcLoc, field_inits: []Air.Inst.Ref, struct_ty: Type, result_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { // We can't get the slices, as the coercion may invalidate them. for (0..anon_struct.types.len) |i| { if (field_inits[i] != .none) { // Coerce the init value to the field type. const field_src = block.src(.{ .init_elem = .{ .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(i), } }); const field_ty = Type.fromInterned(anon_struct.types.get(ip)[i]); field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src); continue; } const default_val = anon_struct.values.get(ip)[i]; if (default_val == .none) { if (anon_struct.names.len == 0) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(init_src, template, .{i}); } } else { const field_name = anon_struct.names.get(ip)[i]; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, args); } else { root_msg = try sema.errMsg(init_src, template, args); } } } else { field_inits[i] = Air.internedToRef(default_val); } } }, .struct_type => { const struct_type = ip.loadStructType(struct_ty.toIntern()); for (0..struct_type.field_types.len) |i| { if (field_inits[i] != .none) { // Coerce the init value to the field type. const field_src = block.src(.{ .init_elem = .{ .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(i), } }); const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src); continue; } try struct_ty.resolveStructFieldInits(pt); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { if (!struct_type.isTuple(ip)) { const field_name = struct_type.field_names.get(ip)[i]; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, args); } else { root_msg = try sema.errMsg(init_src, template, args); } } else { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(init_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(init_src, template, .{i}); } } } else { field_inits[i] = Air.internedToRef(field_init); } } }, else => unreachable, } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, struct_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } // Find which field forces the expression to be runtime, if any. const opt_runtime_index = for (field_inits, 0..) |field_init, i| { if (!(try sema.isComptimeKnown(field_init))) { break i; } } else null; const runtime_index = opt_runtime_index orelse { const elems = try sema.arena.alloc(InternPool.Index, field_inits.len); for (elems, field_inits) |*elem, field_init| { elem.* = (sema.resolveValue(field_init) catch unreachable).?.toIntern(); } const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = elems }, } }); const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val), init_src); const final_val = (try sema.resolveValue(final_val_inst)).?; return sema.addConstantMaybeRef(final_val.toIntern(), is_ref); }; if (try sema.typeRequiresComptime(struct_ty)) { return sema.failWithNeededComptime(block, block.src(.{ .init_elem = .{ .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } }), .{ .needed_comptime_reason = "initializer of comptime only struct must be comptime-known", }); } for (field_inits) |field_init| { try sema.validateRuntimeValue(block, dest_src, field_init); } if (is_ref) { try struct_ty.resolveLayout(pt); const target = mod.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); const base_ptr = try sema.optEuBasePtrInit(block, alloc, init_src); for (field_inits, 0..) |field_init, i_usize| { const i: u32 = @intCast(i_usize); const field_src = dest_src; const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, base_ptr, i, field_src, struct_ty, true); try sema.storePtr(block, dest_src, field_ptr, field_init); } return sema.makePtrConst(block, alloc); } try sema.requireRuntimeBlock(block, dest_src, block.src(.{ .init_elem = .{ .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); try struct_ty.resolveStructFieldInits(pt); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } fn zirStructInitAnon( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); return sema.structInitAnon(block, src, .anon_init, extra.data, extra.end, false); } fn structInitAnon( sema: *Sema, block: *Block, src: LazySrcLoc, /// It is possible for a typed struct_init to be downgraded to an anonymous init due to a /// generic poison type. In this case, we need to know to interpret the extra data differently. comptime kind: enum { anon_init, typed_init }, extra_data: switch (kind) { .anon_init => Zir.Inst.StructInitAnon, .typed_init => Zir.Inst.StructInit, }, extra_end: usize, is_ref: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const zir_datas = sema.code.instructions.items(.data); const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len); const values = try sema.arena.alloc(InternPool.Index, types.len); const names = try sema.arena.alloc(InternPool.NullTerminatedString, types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; var extra_index = extra_end; for (types, values, names, 0..) |*field_ty, *field_val, *field_name, i_usize| { const item = switch (kind) { .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), }; extra_index = item.end; const name = switch (kind) { .anon_init => sema.code.nullTerminatedString(item.data.field_name), .typed_init => name: { // `item.data.field_type` references a `field_type` instruction const field_type_data = zir_datas[@intFromEnum(item.data.field_type)].pl_node; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index); break :name sema.code.nullTerminatedString(field_type_extra.data.name_start); }, }; field_name.* = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init).toIntern(); if (Type.fromInterned(field_ty.*).zigTypeTag(mod) == .Opaque) { const msg = msg: { const field_src = block.src(.{ .init_elem = .{ .init_node_offset = src.offset.node_offset.x, .elem_index = @intCast(i_usize), } }); const msg = try sema.errMsg(field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*)); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (try sema.resolveValue(init)) |init_val| { field_val.* = init_val.toIntern(); } else { field_val.* = .none; runtime_index = @intCast(i_usize); } } break :rs runtime_index; }; const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ .names = names, .types = types, .values = values, }); const runtime_index = opt_runtime_index orelse { const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); return sema.addConstantMaybeRef(tuple_val, is_ref); }; try sema.requireRuntimeBlock(block, LazySrcLoc.unneeded, block.src(.{ .init_elem = .{ .init_node_offset = src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); if (is_ref) { const target = mod.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); var extra_index = extra_end; for (types, 0..) |field_ty, i_usize| { const i: u32 = @intCast(i_usize); const item = switch (kind) { .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), }; extra_index = item.end; const field_ptr_ty = try pt.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); if (values[i] == .none) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); } } return sema.makePtrConst(block, alloc); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len); var extra_index = extra_end; for (types, 0..) |_, i| { const item = switch (kind) { .anon_init => sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index), .typed_init => sema.code.extraData(Zir.Inst.StructInit.Item, extra_index), }; extra_index = item.end; element_refs[i] = try sema.resolveInst(item.data.init); } return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); } fn zirArrayInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); assert(args.len >= 2); // array_ty + at least one element const result_ty = sema.resolveType(block, src, args[0]) catch |err| switch (err) { error.GenericPoison => { // The type wasn't actually known, so treat this as an anon array init. return sema.arrayInitAnon(block, src, args[1..], is_ref); }, else => |e| return e, }; const array_ty = result_ty.optEuBaseType(mod); const is_tuple = array_ty.zigTypeTag(mod) == .Struct; const sentinel_val = array_ty.sentinel(mod); var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); const final_len = try sema.usizeCast(block, src, array_ty.arrayLenIncludingSentinel(mod)); const resolved_args = try gpa.alloc(Air.Inst.Ref, final_len); defer gpa.free(resolved_args); for (resolved_args, 0..) |*dest, i| { const elem_src = block.src(.{ .init_elem = .{ .init_node_offset = src.offset.node_offset.x, .elem_index = @intCast(i), } }); // Less inits than needed. if (i + 2 > args.len) if (is_tuple) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); if (default_val == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(src, msg, template, .{i}); } else { root_msg = try sema.errMsg(src, template, .{i}); } } else { dest.* = Air.internedToRef(default_val); } continue; } else { dest.* = Air.internedToRef(sentinel_val.?.toIntern()); break; }; const arg = args[i + 1]; const resolved_arg = try sema.resolveInst(arg); const elem_ty = if (is_tuple) array_ty.structFieldType(i, mod) else array_ty.elemType2(mod); dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) try array_ty.resolveStructFieldInits(pt); if (try array_ty.structFieldValueComptime(pt, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); }; if (!field_val.eql(init_val, elem_ty, mod)) { return sema.failWithInvalidComptimeFieldStore(block, elem_src, array_ty, i); } } } } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, array_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { const comptime_known = try sema.isComptimeKnown(arg); if (!comptime_known) break @intCast(i); } else null; const runtime_index = opt_runtime_index orelse { const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); for (elem_vals, resolved_args) |*val, arg| { // We checked that all args are comptime above. val.* = (sema.resolveValue(arg) catch unreachable).?.toIntern(); } const arr_val = try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .elems = elem_vals }, } }); const result_ref = try sema.coerce(block, result_ty, Air.internedToRef(arr_val), src); const result_val = (try sema.resolveValue(result_ref)).?; return sema.addConstantMaybeRef(result_val.toIntern(), is_ref); }; try sema.requireRuntimeBlock(block, LazySrcLoc.unneeded, block.src(.{ .init_elem = .{ .init_node_offset = src.offset.node_offset.x, .elem_index = runtime_index, } })); if (is_ref) { const target = mod.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); const base_ptr = try sema.optEuBasePtrInit(block, alloc, src); if (is_tuple) { for (resolved_args, 0..) |arg, i| { const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); const index = try pt.intRef(Type.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } return sema.makePtrConst(block, alloc); } const elem_ptr_ty = try pt.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern()); for (resolved_args, 0..) |arg, i| { const index = try pt.intRef(Type.usize, i); const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref); _ = try block.addBinOp(.store, elem_ptr, arg); } return sema.makePtrConst(block, alloc); } const arr_ref = try block.addAggregateInit(array_ty, resolved_args); return sema.coerce(block, result_ty, arr_ref, src); } fn zirArrayInitAnon( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); return sema.arrayInitAnon(block, src, operands, false); } fn arrayInitAnon( sema: *Sema, block: *Block, src: LazySrcLoc, operands: []const Zir.Inst.Ref, is_ref: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const types = try sema.arena.alloc(InternPool.Index, operands.len); const values = try sema.arena.alloc(InternPool.Index, operands.len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); types[i] = sema.typeOf(elem).toIntern(); if (Type.fromInterned(types[i]).zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i])); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (try sema.resolveValue(elem)) |val| { values[i] = val.toIntern(); } else { values[i] = .none; runtime_src = operand_src; } } break :rs runtime_src; }; const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ .types = types, .values = values, .names = &.{}, }); const runtime_src = opt_runtime_src orelse { const tuple_val = try pt.intern(.{ .aggregate = .{ .ty = tuple_ty, .storage = .{ .elems = values }, } }); return sema.addConstantMaybeRef(tuple_val, is_ref); }; try sema.requireRuntimeBlock(block, src, runtime_src); if (is_ref) { const target = sema.pt.zcu.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); const field_ptr_ty = try pt.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); if (values[i] == .none) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } } return sema.makePtrConst(block, alloc); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len); for (operands, 0..) |operand, i| { element_refs[i] = try sema.resolveInst(operand); } return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); } fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref { return if (is_ref) sema.uavRef(val) else Air.internedToRef(val); } fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldTypeRef, inst_data.payload_index).data; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const field_src = block.builtinCallArgSrc(inst_data.src_node, 1); const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type); const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", }); return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src); } fn zirStructInitFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const ty_src = block.nodeOffset(inst_data.src_node); const field_name_src = block.src(.{ .node_offset_field_name_init = inst_data.src_node }); const wrapped_aggregate_ty = sema.resolveType(block, ty_src, extra.container_type) catch |err| switch (err) { // Since this is a ZIR instruction that returns a type, encountering // generic poison should not result in a failed compilation, but the // generic poison type. This prevents unnecessary failures when // constructing types at compile-time. error.GenericPoison => return .generic_poison_type, else => |e| return e, }; const aggregate_ty = wrapped_aggregate_ty.optEuBaseType(mod); const zir_field_name = sema.code.nullTerminatedString(extra.name_start); const field_name = try ip.getOrPutString(sema.gpa, pt.tid, zir_field_name, .no_embedded_nulls); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } fn fieldType( sema: *Sema, block: *Block, aggregate_ty: Type, field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { try cur_ty.resolveFields(pt); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { const field_index = if (anon_struct.names.len == 0) try sema.tupleFieldIndex(block, cur_ty, field_name, field_src) else try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return Air.internedToRef(anon_struct.types.get(ip)[field_index]); }, .struct_type => { const struct_type = ip.loadStructType(cur_ty.toIntern()); const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, cur_ty, struct_type, field_src, field_name); const field_ty = struct_type.field_types.get(ip)[field_index]; return Air.internedToRef(field_ty); }, else => unreachable, }, .Union => { const union_obj = mod.typeToUnion(cur_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, cur_ty, union_obj, field_src, field_name); const field_ty = union_obj.field_types.get(ip)[field_index]; return Air.internedToRef(field_ty); }, .Optional => { // Struct/array init through optional requires the child type to not be a pointer. // If the child of .optional is a pointer it'll error on the next loop. cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type); continue; }, .ErrorUnion => { cur_ty = cur_ty.errorUnionPayload(mod); continue; }, else => {}, } return sema.fail(block, ty_src, "expected struct or union; found '{}'", .{ cur_ty.fmt(pt), }); } } fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { return sema.getErrorReturnTrace(block); } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); switch (sema.owner.unwrap()) { .func => |func| if (ip.funcAnalysisUnordered(func).calls_or_awaits_errorable_fn and block.ownerModule().error_tracing) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); }, .cau => {}, } return Air.internedToRef(try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), .val = .none, } })); } fn zirFrame( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); return sema.failWithUseOfAsync(block, src); } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, operand_src, inst_data.operand); if (ty.isNoReturn(pt.zcu)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(pt)}); } const val = try ty.lazyAbiAlignment(pt); return Air.internedToRef(val.toIntern()); } fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const operand_scalar_ty = operand_ty.scalarType(mod); if (operand_scalar_ty.toIntern() != .bool_type) { return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(mod)}); } if (try sema.resolveValue(operand)) |val| { if (!is_vector) { if (val.isUndef(mod)) return pt.undefRef(Type.u1); if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern()); return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern()); } const len = operand_ty.vectorLen(mod); const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len }); if (val.isUndef(mod)) return pt.undefRef(dest_ty); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { const old_elem = try val.elemValue(pt, i); const new_val = if (old_elem.isUndef(mod)) try pt.undefValue(Type.u1) else if (old_elem.toBool()) try pt.intValue(Type.u1, 1) else try pt.intValue(Type.u1, 0); new_elem.* = new_val.toIntern(); } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } if (!is_vector) { return block.addUnOp(.int_from_bool, operand); } const len = operand_ty.vectorLen(mod); const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len }); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addUnOp(.int_from_bool, old_elem); } return block.addAggregateInit(dest_ty, new_elems); } fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const uncoerced_operand = try sema.resolveInst(inst_data.operand); const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addNullTerminatedStrLit(err_name); } // Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass // might be able to resolve the result at compile time. return block.addUnOp(.error_name, operand); } fn zirAbs( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_ty = sema.typeOf(operand); const scalar_ty = operand_ty.scalarType(mod); const result_ty = switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => operand_ty, .Int => if (scalar_ty.isSignedInt(mod)) try operand_ty.toUnsigned(pt) else return operand, else => return sema.fail( block, operand_src, "expected integer, float, or vector of either integers or floats, found '{}'", .{operand_ty.fmt(pt)}, ), }; return (try sema.maybeConstantUnaryMath(operand, result_ty, Value.abs)) orelse { try sema.requireRuntimeBlock(block, operand_src, null); return block.addTyOp(.abs, result_ty, operand); }; } fn maybeConstantUnaryMath( sema: *Sema, operand: Air.Inst.Ref, result_ty: Type, comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value, ) CompileError!?Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; switch (result_ty.zigTypeTag(mod)) { .Vector => if (try sema.resolveValue(operand)) |val| { const scalar_ty = result_ty.scalarType(mod); const vec_len = result_ty.vectorLen(mod); if (val.isUndef(mod)) return try pt.undefRef(result_ty); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); elem.* = (try eval(elem_val, scalar_ty, sema.arena, pt)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); }, else => if (try sema.resolveValue(operand)) |operand_val| { if (operand_val.isUndef(mod)) return try pt.undefRef(result_ty); const result_val = try eval(operand_val, result_ty, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); }, } return null; } fn zirUnaryMath( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, comptime eval: fn (Value, Type, Allocator, Zcu.PerThread) Allocator.Error!Value, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_ty = sema.typeOf(operand); const scalar_ty = operand_ty.scalarType(mod); switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail( block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(pt)}, ), } return (try sema.maybeConstantUnaryMath(operand, operand_ty, eval)) orelse { try sema.requireRuntimeBlock(block, operand_src, null); return block.addUnOp(air_tag, operand); }; } fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const src = block.nodeOffset(inst_data.src_node); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; try operand_ty.resolveLayout(pt); const enum_ty = switch (operand_ty.zigTypeTag(zcu)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); const tag_name = ip.indexToKey(val.toIntern()).enum_literal; return sema.addNullTerminatedStrLit(tag_name); }, .Enum => operand_ty, .Union => operand_ty.unionTagType(zcu) orelse return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(pt)}), else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{ operand_ty.fmt(pt), }), }; if (enum_ty.enumFieldCount(zcu) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. // https://github.com/ziglang/zig/issues/15909 return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ enum_ty.fmt(pt), }); } const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val, zcu) orelse { const msg = msg: { const msg = try sema.errMsg(src, "no field with value '{}' in enum '{}'", .{ val.fmtValueSema(pt, sema), enum_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(enum_ty.srcLoc(zcu), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; // TODO: write something like getCoercedInts to avoid needing to dupe const field_name = enum_ty.enumFieldName(field_index, zcu); return sema.addNullTerminatedStrLit(field_name); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety() and zcu.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, casted_operand); try sema.addSafetyCheck(block, src, ok, .invalid_enum_value); } // In case the value is runtime-known, we have an AIR instruction for this instead // of trying to lower it in Sema because an optimization pass may result in the operand // being comptime-known, which would let us elide the `tag_name` AIR instruction. return block.addUnOp(.tag_name, casted_operand); } fn zirReify( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small); const extra = sema.code.extraData(Zir.Inst.Reify, extended.operand).data; const tracked_inst = try block.trackZir(inst); const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }; const operand_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_builtin_call_arg = .{ .builtin_call_node = 0, // `tracked_inst` is precisely the `reify` instruction, so offset is 0 .arg_index = 0, }, }, }; const type_info_ty = try pt.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ .needed_comptime_reason = "operand to @Type must be comptime-known", }); const union_val = ip.indexToKey(val.toIntern()).un; if (try sema.anyUndef(block, operand_src, Value.fromInterned(union_val.val))) { return sema.failWithUseOfUndef(block, operand_src); } const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), mod).?; switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) { .Type => return .type_type, .Void => return .void_type, .Bool => return .bool_type, .NoReturn => return .noreturn_type, .ComptimeFloat => return .comptime_float_type, .ComptimeInt => return .comptime_int_type, .Undefined => return .undefined_type, .Null => return .null_type, .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return .enum_literal_type, .Int => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const signedness_val = try Value.fromInterned(union_val.val).fieldValue( pt, struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "signedness", .no_embedded_nulls)).?, ); const bits_val = try Value.fromInterned(union_val.val).fieldValue( pt, struct_type.nameIndex(ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls)).?, ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); const ty = try pt.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, .Vector => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const len: u32 = @intCast(try len_val.toUnsignedIntSema(pt)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); const ty = try pt.vectorType(.{ .len = len, .child = child_ty.toIntern(), }); return Air.internedToRef(ty.toIntern()); }, .Float => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "bits", .no_embedded_nulls), ).?); const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(pt)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, 64 => Type.f64, 80 => Type.f80, 128 => Type.f128, else => return sema.fail(block, src, "{}-bit float unsupported", .{bits}), }; return Air.internedToRef(ty.toIntern()); }, .Pointer => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "size", .no_embedded_nulls), ).?); const is_const_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_const", .no_embedded_nulls), ).?); const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_volatile", .no_embedded_nulls), ).?); const alignment_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "alignment", .no_embedded_nulls), ).?); const address_space_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "address_space", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_allowzero", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(pt, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } const abi_align = Alignment.fromByteUnits(alignment_val_int); const elem_ty = child_val.toType(); if (abi_align != .none) { try elem_ty.resolveLayout(pt); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); const actual_sentinel: InternPool.Index = s: { if (!sentinel_val.isNull(mod)) { if (ptr_size == .One or ptr_size == .C) { return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } const sentinel_ptr_val = sentinel_val.optionalValue(mod).?; const ptr_ty = try pt.singleMutPtrType(elem_ty); const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; break :s sent_val.toIntern(); } break :s .none; }; if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } const ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ .size = ptr_size, .is_const = is_const_val.toBool(), .is_volatile = is_volatile_val.toBool(), .alignment = abi_align, .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), .is_allowzero = is_allowzero_val.toBool(), }, }); return Air.internedToRef(ty.toIntern()); }, .Array => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), ).?); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "sentinel", .no_embedded_nulls), ).?); const len = try len_val.toUnsignedIntSema(pt); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try pt.singleMutPtrType(child_ty); break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; const ty = try pt.arrayType(.{ .len = len, .sentinel = if (sentinel) |s| s.toIntern() else .none, .child = child_ty.toIntern(), }); return Air.internedToRef(ty.toIntern()); }, .Optional => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "child", .no_embedded_nulls), ).?); const child_ty = child_val.toType(); const ty = try pt.optionalType(child_ty.toIntern()); return Air.internedToRef(ty.toIntern()); }, .ErrorUnion => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "error_set", .no_embedded_nulls), ).?); const payload_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "payload", .no_embedded_nulls), ).?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } const ty = try pt.errorUnionType(error_set_ty, payload_ty); return Air.internedToRef(ty.toIntern()); }, .ErrorSet => { const payload_val = Value.fromInterned(union_val.val).optionalValue(mod) orelse return Air.internedToRef(Type.anyerror.toIntern()); const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{ .needed_comptime_reason = "error set contents must be comptime-known", }); const len = try sema.usizeCast(block, src, names_val.typeOf(mod).arrayLen(mod)); var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { const elem_val = try names_val.elemValue(pt, i); const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const name_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), ).?); const name = try sema.sliceToIpString(block, src, name_val, .{ .needed_comptime_reason = "error set contents must be comptime-known", }); _ = try pt.getErrorValue(name); const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{}'", .{ name.fmt(ip), }); } } const ty = try pt.errorSetFromUnsortedNames(names.keys()); return Air.internedToRef(ty.toIntern()); }, .Struct => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "backing_integer", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_tuple", .no_embedded_nulls), ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } if (layout != .@"packed" and !backing_integer_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{ .needed_comptime_reason = "struct fields must be comptime-known", }); return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool()); }, .Enum => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_exhaustive", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{ .needed_comptime_reason = "enum fields must be comptime-known", }); return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_arr, name_strategy); }, .Opaque => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); // Decls if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, .{ .key = .{ .reified = .{ .zir_index = try block.trackZir(inst), } }, })) { .existing => |ty| { try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); wip_ty.setName(ip, try sema.createTypeName( block, name_strategy, "opaque", inst, wip_ty.index, )); const new_namespace_index = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index)); }, .Union => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "layout", .no_embedded_nulls), ).?); const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "tag_type", .no_embedded_nulls), ).?); const fields_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "fields", .no_embedded_nulls), ).?); const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "decls", .no_embedded_nulls), ).?); if (try decls_val.sliceLen(pt) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); const fields_arr = try sema.derefSliceAsArray(block, operand_src, fields_val, .{ .needed_comptime_reason = "union fields must be comptime-known", }); return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_arr, name_strategy); }, .Fn => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "calling_convention", .no_embedded_nulls), ).?); const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_var_args", .no_embedded_nulls), ).?); const return_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "return_type", .no_embedded_nulls), ).?); const params_slice_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "params", .no_embedded_nulls), ).?); const is_generic = is_generic_val.toBool(); if (is_generic) { return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{}); } const is_var_args = is_var_args_val.toBool(); const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val); if (is_var_args) { try sema.checkCallConvSupportsVarArgs(block, src, cc); } const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); const params_val = try sema.derefSliceAsArray(block, operand_src, params_slice_val, .{ .needed_comptime_reason = "function parameters must be comptime-known", }); const args_len = try sema.usizeCast(block, src, params_val.typeOf(mod).arrayLen(mod)); const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { const elem_val = try params_val.elemValue(pt, i); const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern())); const param_is_generic_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_generic", .no_embedded_nulls), ).?); const param_is_noalias_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "is_noalias", .no_embedded_nulls), ).?); const opt_param_type_val = try elem_val.fieldValue(pt, elem_struct_type.nameIndex( ip, try ip.getOrPutString(gpa, pt.tid, "type", .no_embedded_nulls), ).?); if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } const param_type_val = opt_param_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.type must be non-null for @Type", .{}); param_type.* = param_type_val.toIntern(); if (param_is_noalias_val.toBool()) { if (!Type.fromInterned(param_type.*).isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } } const ty = try pt.funcType(.{ .param_types = param_types, .noalias_bits = noalias_bits, .return_type = return_type.toIntern(), .cc = cc, .is_var_args = is_var_args, }); return Air.internedToRef(ty.toIntern()); }, .Frame => return sema.failWithUseOfAsync(block, src), } } fn reifyEnum( sema: *Sema, block: *Block, inst: Zir.Inst.Index, src: LazySrcLoc, tag_ty: Type, is_exhaustive: bool, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; // This logic must stay in sync with the structure of `std.builtin.Type.Enum` - search for `fieldValue`. const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod)); // The validation work here is non-trivial, and it's possible the type already exists. // So in this first pass, let's just construct a hash to optimize for this case. If the // inputs turn out to be invalid, we can cancel the WIP type later. // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, tag_ty.toIntern()); std.hash.autoHash(&hasher, is_exhaustive); std.hash.autoHash(&hasher, fields_len); for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "enum field name must be comptime-known", }); std.hash.autoHash(&hasher, .{ field_name, field_value_val.toIntern(), }); } const tracked_inst = try block.trackZir(inst); const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{ .has_values = true, .tag_mode = if (is_exhaustive) .explicit else .nonexhaustive, .fields_len = fields_len, .key = .{ .reified = .{ .zir_index = tracked_inst, .type_hash = hasher.final(), } }, }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, }; var done = false; errdefer if (!done) wip_ty.cancel(ip, pt.tid); if (tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); } wip_ty.setName(ip, try sema.createTypeName( block, name_strategy, "enum", inst, wip_ty.index, )); const new_namespace_index = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); wip_ty.prepare(ip, new_cau_index, new_namespace_index); wip_ty.setTagTy(ip, tag_ty.toIntern()); done = true; for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_value_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 1)); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); if (!try sema.intFitsInType(field_value_val, tag_ty, null)) { // TODO: better source location return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{ field_name.fmt(ip), field_value_val.fmtValueSema(pt, sema), tag_ty.fmt(pt), }); } const coerced_field_val = try pt.getCoerced(field_value_val, tag_ty); if (wip_ty.nextField(ip, field_name, coerced_field_val.toIntern())) |conflict| { return sema.failWithOwnedErrorMsg(block, switch (conflict.kind) { .name => msg: { const msg = try sema.errMsg(src, "duplicate enum field '{}'", .{field_name.fmt(ip)}); errdefer msg.destroy(gpa); _ = conflict.prev_field_idx; // TODO: this note is incorrect try sema.errNote(src, msg, "other field here", .{}); break :msg msg; }, .value => msg: { const msg = try sema.errMsg(src, "enum tag value {} already taken", .{field_value_val.fmtValueSema(pt, sema)}); errdefer msg.destroy(gpa); _ = conflict.prev_field_idx; // TODO: this note is incorrect try sema.errNote(src, msg, "other enum tag value here", .{}); break :msg msg; }, }); } } if (!is_exhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(pt)) { return sema.fail(block, src, "non-exhaustive enum specified every value", .{}); } codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } return Air.internedToRef(wip_ty.index); } fn reifyUnion( sema: *Sema, block: *Block, inst: Zir.Inst.Index, src: LazySrcLoc, layout: std.builtin.Type.ContainerLayout, opt_tag_type_val: Value, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; // This logic must stay in sync with the structure of `std.builtin.Type.Union` - search for `fieldValue`. const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod)); // The validation work here is non-trivial, and it's possible the type already exists. // So in this first pass, let's just construct a hash to optimize for this case. If the // inputs turn out to be invalid, we can cancel the WIP type later. // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, layout); std.hash.autoHash(&hasher, opt_tag_type_val.toIntern()); std.hash.autoHash(&hasher, fields_len); var any_aligns = false; for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_type_val = try field_info.fieldValue(pt, 1); const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 2)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "union field name must be comptime-known", }); std.hash.autoHash(&hasher, .{ field_name, field_type_val.toIntern(), field_align_val.toIntern(), }); if (field_align_val.toUnsignedInt(pt) != 0) { any_aligns = true; } } const tracked_inst = try block.trackZir(inst); const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{ .flags = .{ .layout = layout, .status = .none, .runtime_tag = if (opt_tag_type_val.optionalValue(mod) != null) .tagged else if (layout != .auto) .none else switch (block.wantSafety()) { true => .safety, false => .none, }, .any_aligned_fields = any_aligns, .requires_comptime = .unknown, .assumed_runtime_bits = false, .assumed_pointer_aligned = false, .alignment = .none, }, .fields_len = fields_len, .enum_tag_ty = .none, // set later because not yet validated .field_types = &.{}, // set later .field_aligns = &.{}, // set later .key = .{ .reified = .{ .zir_index = tracked_inst, .type_hash = hasher.final(), } }, }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, }; errdefer wip_ty.cancel(ip, pt.tid); const type_name = try sema.createTypeName( block, name_strategy, "union", inst, wip_ty.index, ); wip_ty.setName(ip, type_name); const field_types = try sema.arena.alloc(InternPool.Index, fields_len); const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined; const enum_tag_ty, const has_explicit_tag = if (opt_tag_type_val.optionalValue(mod)) |tag_type_val| tag_ty: { switch (ip.indexToKey(tag_type_val.toIntern())) { .enum_type => {}, else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), } const enum_tag_ty = tag_type_val.toType(); // We simply track which fields of the tag type have been seen. const tag_ty_fields_len = enum_tag_ty.enumFieldCount(mod); var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len); for (field_types, 0..) |*field_ty, field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_type_val = try field_info.fieldValue(pt, 1); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); const enum_index = enum_tag_ty.enumFieldIndex(field_name, mod) orelse { // TODO: better source location return sema.fail(block, src, "no field named '{}' in enum '{}'", .{ field_name.fmt(ip), enum_tag_ty.fmt(pt), }); }; if (seen_tags.isSet(enum_index)) { // TODO: better source location return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)}); } seen_tags.set(enum_index); field_ty.* = field_type_val.toIntern(); if (any_aligns) { const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); } field_aligns[field_idx] = Alignment.fromByteUnits(byte_align); } } if (tag_ty_fields_len > fields_len) return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "enum fields missing in union", .{}); errdefer msg.destroy(gpa); var it = seen_tags.iterator(.{ .kind = .unset }); while (it.next()) |enum_index| { const field_name = enum_tag_ty.enumFieldName(enum_index, mod); try sema.addFieldErrNote(enum_tag_ty, enum_index, msg, "field '{}' missing, declared here", .{ field_name.fmt(ip), }); } try sema.addDeclaredHereNote(msg, enum_tag_ty); break :msg msg; }); break :tag_ty .{ enum_tag_ty.toIntern(), true }; } else tag_ty: { // We must track field names and set up the tag type ourselves. var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; try field_names.ensureTotalCapacity(sema.arena, fields_len); for (field_types, 0..) |*field_ty, field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_type_val = try field_info.fieldValue(pt, 1); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); const gop = field_names.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)}); } field_ty.* = field_type_val.toIntern(); if (any_aligns) { const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); } field_aligns[field_idx] = Alignment.fromByteUnits(byte_align); } } const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), wip_ty.index, type_name); break :tag_ty .{ enum_tag_ty, false }; }; errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error for (field_types) |field_ty_ip| { const field_ty = Type.fromInterned(field_ty_ip); if (field_ty.zigTypeTag(mod) == .Opaque) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotPacked(msg, src, field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } } const loaded_union = ip.loadUnionType(wip_ty.index); loaded_union.setFieldTypes(ip, field_types); if (any_aligns) { loaded_union.setFieldAligns(ip, field_aligns); } loaded_union.setTagType(ip, enum_tag_ty); loaded_union.setStatus(ip, .have_field_types); const new_namespace_index = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } fn reifyStruct( sema: *Sema, block: *Block, inst: Zir.Inst.Index, src: LazySrcLoc, layout: std.builtin.Type.ContainerLayout, opt_backing_int_val: Value, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; // This logic must stay in sync with the structure of `std.builtin.Type.Struct` - search for `fieldValue`. const fields_len: u32 = @intCast(fields_val.typeOf(mod).arrayLen(mod)); // The validation work here is non-trivial, and it's possible the type already exists. // So in this first pass, let's just construct a hash to optimize for this case. If the // inputs turn out to be invalid, we can cancel the WIP type later. // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, layout); std.hash.autoHash(&hasher, opt_backing_int_val.toIntern()); std.hash.autoHash(&hasher, is_tuple); std.hash.autoHash(&hasher, fields_len); var any_comptime_fields = false; var any_default_inits = false; var any_aligned_fields = false; for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_type_val = try field_info.fieldValue(pt, 1); const field_default_value_val = try field_info.fieldValue(pt, 2); const field_is_comptime_val = try field_info.fieldValue(pt, 3); const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 4)); const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .needed_comptime_reason = "struct field name must be comptime-known", }); const field_is_comptime = field_is_comptime_val.toBool(); const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(mod)) |ptr_val| d: { const ptr_ty = try pt.singleConstPtrType(field_type_val.toType()); // We need to do this deref here, so we won't check for this error case later on. const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime( block, src, .{ .needed_comptime_reason = "struct field default value must be comptime-known" }, ); // Resolve the value so that lazy values do not create distinct types. break :d (try sema.resolveLazyValue(val)).toIntern(); } else .none; std.hash.autoHash(&hasher, .{ field_name, field_type_val.toIntern(), field_default_value, field_is_comptime, field_alignment_val.toIntern(), }); if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; switch (try field_alignment_val.orderAgainstZeroAdvanced(pt, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, } } const tracked_inst = try block.trackZir(inst); const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ .layout = layout, .fields_len = fields_len, .known_non_opv = false, .requires_comptime = .unknown, .is_tuple = is_tuple, .any_comptime_fields = any_comptime_fields, .any_default_inits = any_default_inits, .any_aligned_fields = any_aligned_fields, .inits_resolved = true, .key = .{ .reified = .{ .zir_index = tracked_inst, .type_hash = hasher.final(), } }, }, false)) { .wip => |wip| wip, .existing => |ty| { try sema.declareDependency(.{ .interned = ty }); try sema.addTypeReferenceEntry(src, ty); return Air.internedToRef(ty); }, }; errdefer wip_ty.cancel(ip, pt.tid); if (is_tuple) switch (layout) { .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}), .@"packed" => return sema.fail(block, src, "packed tuples are not supported", .{}), .auto => {}, }; wip_ty.setName(ip, try sema.createTypeName( block, name_strategy, "struct", inst, wip_ty.index, )); const struct_type = ip.loadStructType(wip_ty.index); for (0..fields_len) |field_idx| { const field_info = try fields_val.elemValue(pt, field_idx); const field_name_val = try field_info.fieldValue(pt, 0); const field_type_val = try field_info.fieldValue(pt, 1); const field_default_value_val = try field_info.fieldValue(pt, 2); const field_is_comptime_val = try field_info.fieldValue(pt, 3); const field_alignment_val = try field_info.fieldValue(pt, 4); const field_ty = field_type_val.toType(); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); if (is_tuple) { const field_name_index = field_name.toUnsigned(ip) orelse return sema.fail( block, src, "tuple cannot have non-numeric field '{}'", .{field_name.fmt(ip)}, ); if (field_name_index != field_idx) { return sema.fail( block, src, "tuple field name '{}' does not match field index {}", .{ field_name_index, field_idx }, ); } } else if (struct_type.addFieldName(ip, field_name)) |prev_index| { _ = prev_index; // TODO: better source location return sema.fail(block, src, "duplicate struct field name {}", .{field_name.fmt(ip)}); } if (any_aligned_fields) { if (!try sema.intFitsInType(field_alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } const byte_align = try field_alignment_val.toUnsignedIntSema(pt); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; } } else { if (layout == .@"packed") return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); if (!math.isPowerOfTwo(byte_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); struct_type.field_aligns.get(ip)[field_idx] = Alignment.fromNonzeroByteUnits(byte_align); } } const field_is_comptime = field_is_comptime_val.toBool(); if (field_is_comptime) { assert(any_comptime_fields); switch (layout) { .@"extern" => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}), .@"packed" => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}), .auto => struct_type.setFieldComptime(ip, field_idx), } } const field_default: InternPool.Index = d: { if (!any_default_inits) break :d .none; const ptr_val = field_default_value_val.optionalValue(mod) orelse break :d .none; const ptr_ty = try pt.singleConstPtrType(field_ty); // Asserted comptime-dereferencable above. const val = (try sema.pointerDeref(block, src, ptr_val, ptr_ty)).?; // We already resolved this for deduplication, so we may as well do it now. break :d (try sema.resolveLazyValue(val)).toIntern(); }; if (field_is_comptime and field_default == .none) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } struct_type.field_types.get(ip)[field_idx] = field_type_val.toIntern(); if (field_default != .none) { struct_type.field_inits.get(ip)[field_idx] = field_default; } if (field_ty.zigTypeTag(mod) == .Opaque) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } if (field_ty.zigTypeTag(mod) == .NoReturn) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } if (layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotExtern(msg, src, field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.explainWhyTypeIsNotPacked(msg, src, field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }); } } if (layout == .@"packed") { var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); field_ty.resolveLayout(pt) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); return err; }, else => return err, }; fields_bit_sum += field_ty.bitSize(pt); } if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| { const backing_int_ty = backing_int_val.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } else { const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } } const new_namespace_index = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), .generation = mod.generation, }); const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); codegen_type: { if (mod.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; try mod.comp.queueJob(.{ .codegen_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { const pt = sema.pt; const va_list_ty = try pt.getBuiltinType("VaList"); const va_list_ptr = try pt.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); return sema.coerce(block, va_list_ptr, inst, src); } fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = block.nodeOffset(extra.node); const va_list_src = block.builtinCallArgSrc(extra.node, 0); const ty_src = block.builtinCallArgSrc(extra.node, 1); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs); const arg_ty = try sema.resolveType(block, ty_src, extra.rhs); if (!try sema.validateExternType(arg_ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, arg_ty, .param_ty); try sema.addDeclaredHereNote(msg, arg_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_arg, arg_ty, va_list_ref); } fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); const va_list_ty = try sema.pt.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); } fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(.c_va_end, va_list_ref); } fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); const va_list_ty = try sema.pt.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, .data = .{ .ty = va_list_ty }, }); } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ty = try sema.resolveType(block, ty_src, inst_data.operand); const type_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{}", .{ty.fmt(pt)}, .no_embedded_nulls); return sema.addNullTerminatedStrLit(type_name); } fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); return sema.failWithUseOfAsync(block, src); } fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); return sema.failWithUseOfAsync(block, src); } fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@intFromFloat"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src); const is_vector = dest_ty.zigTypeTag(mod) == .Vector; const dest_scalar_ty = dest_ty.scalarType(mod); const operand_scalar_ty = operand_ty.scalarType(mod); _ = try sema.checkIntType(block, src, dest_scalar_ty); try sema.checkFloatType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { const result_val = try sema.intFromFloat(block, operand_src, operand_val, operand_ty, dest_ty, .truncate); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, .{ .needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known", }); } try sema.requireRuntimeBlock(block, src, operand_src); if (dest_scalar_ty.intInfo(mod).bits == 0) { if (!is_vector) { if (block.wantSafety()) { const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try pt.floatValue(operand_ty, 0.0)).toIntern())); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } return Air.internedToRef((try pt.intValue(dest_ty, 0)).toIntern()); } if (block.wantSafety()) { const len = dest_ty.vectorLen(mod); for (0..len) |i| { const idx_ref = try pt.intRef(Type.usize, i); const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref); const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern())); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = (try pt.intValue(dest_scalar_ty, 0)).toIntern() }, } })); } if (!is_vector) { const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.float_from_int, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern())); const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern())); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } return result; } const len = dest_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem); if (block.wantSafety()) { const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result); const diff = try block.addBinOp(.sub, old_elem, back); const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 1.0)).toIntern())); const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_scalar_ty, -1.0)).toIntern())); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds); } new_elem.* = result; } return block.addAggregateInit(dest_ty, new_elems); } fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@floatFromInt"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src); const is_vector = dest_ty.zigTypeTag(mod) == .Vector; const dest_scalar_ty = dest_ty.scalarType(mod); const operand_scalar_ty = operand_ty.scalarType(mod); try sema.checkFloatType(block, src, dest_scalar_ty); _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, pt, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ .needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known", }); } try sema.requireRuntimeBlock(block, src, operand_src); if (!is_vector) { return block.addTyOp(.float_from_int, dest_ty, operand); } const len = operand_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref); new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem); } return block.addAggregateInit(dest_ty, new_elems); } fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_res = try sema.resolveInst(extra.rhs); const uncoerced_operand_ty = sema.typeOf(operand_res); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrFromInt"); try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src); const is_vector = dest_ty.zigTypeTag(mod) == .Vector; const operand_ty = if (is_vector) operand_ty: { const len = dest_ty.vectorLen(mod); break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len }); } else Type.usize; const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src); const ptr_ty = dest_ty.scalarType(mod); try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(pt, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "slice length cannot be inferred from address", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { if (!is_vector) { const ptr_val = try sema.ptrFromIntVal(block, operand_src, val, ptr_ty, ptr_align); return Air.internedToRef(ptr_val.toIntern()); } const len = dest_ty.vectorLen(mod); const new_elems = try sema.arena.alloc(InternPool.Index, len); for (new_elems, 0..) |*new_elem, i| { const elem = try val.elemValue(pt, i); const ptr_val = try sema.ptrFromIntVal(block, operand_src, elem, ptr_ty, ptr_align); new_elem.* = ptr_val.toIntern(); } return Air.internedToRef(try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = new_elems }, } })); } if (try sema.typeRequiresComptime(ptr_ty)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer to comptime-only type '{}' must be comptime-known, but operand is runtime-known", .{ptr_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, src, ptr_ty); break :msg msg; }); } try sema.requireRuntimeBlock(block, src, operand_src); if (!is_vector) { if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } if (ptr_align.compare(.gt, .@"1")) { const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); } } return block.addBitCast(dest_ty, operand_coerced); } const len = dest_ty.vectorLen(mod); if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { for (0..len) |i| { const idx_ref = try pt.intRef(Type.usize, i); const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize); try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } if (ptr_align.compare(.gt, .@"1")) { const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment); } } } const new_elems = try sema.arena.alloc(Air.Inst.Ref, len); for (new_elems, 0..) |*new_elem, i| { const idx_ref = try pt.intRef(Type.usize, i); const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref); new_elem.* = try block.addBitCast(ptr_ty, old_elem); } return block.addAggregateInit(dest_ty, new_elems); } fn ptrFromIntVal( sema: *Sema, block: *Block, operand_src: LazySrcLoc, operand_val: Value, ptr_ty: Type, ptr_align: Alignment, ) !Value { const pt = sema.pt; const zcu = pt.zcu; if (operand_val.isUndef(zcu)) { if (ptr_ty.isAllowzeroPtr(zcu) and ptr_align == .@"1") { return pt.undefValue(ptr_ty); } return sema.failWithUseOfUndef(block, operand_src); } const addr = try operand_val.toUnsignedIntSema(pt); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(pt)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)}); return switch (ptr_ty.zigTypeTag(zcu)) { .Optional => Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = ptr_ty.toIntern(), .val = if (addr == 0) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(), } })), .Pointer => try pt.ptrIntValue(ptr_ty, addr), else => unreachable, }; } fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.builtinCallArgSrc(extra.node, 0); const base_dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_opt, "@errorCast"); const operand = try sema.resolveInst(extra.rhs); const base_operand_ty = sema.typeOf(operand); const dest_tag = base_dest_ty.zigTypeTag(mod); const operand_tag = base_operand_ty.zigTypeTag(mod); if (dest_tag != .ErrorSet and dest_tag != .ErrorUnion) { return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(dest_tag)}); } if (operand_tag != .ErrorSet and operand_tag != .ErrorUnion) { return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(operand_tag)}); } if (dest_tag == .ErrorSet and operand_tag == .ErrorUnion) { return sema.fail(block, src, "cannot cast an error union type to error set", .{}); } if (dest_tag == .ErrorUnion and operand_tag == .ErrorUnion and base_dest_ty.errorUnionPayload(mod).toIntern() != base_operand_ty.errorUnionPayload(mod).toIntern()) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "payload types of error unions must match", .{}); errdefer msg.destroy(sema.gpa); const dest_ty = base_dest_ty.errorUnionPayload(mod); const operand_ty = base_operand_ty.errorUnionPayload(mod); try sema.errNote(src, msg, "destination payload is '{}'", .{dest_ty.fmt(pt)}); try sema.errNote(src, msg, "operand payload is '{}'", .{operand_ty.fmt(pt)}); try addDeclaredHereNote(sema, msg, dest_ty); try addDeclaredHereNote(sema, msg, operand_ty); break :msg msg; }); } const dest_ty = if (dest_tag == .ErrorUnion) base_dest_ty.errorUnionSet(mod) else base_dest_ty; const operand_ty = if (operand_tag == .ErrorUnion) base_operand_ty.errorUnionSet(mod) else base_operand_ty; // operand must be defined since it can be an invalid error value const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand); const disjoint = disjoint: { // Try avoiding resolving inferred error sets if we can if (!dest_ty.isAnyError(mod) and dest_ty.errorSetIsEmpty(mod)) break :disjoint true; if (!operand_ty.isAnyError(mod) and operand_ty.errorSetIsEmpty(mod)) break :disjoint true; if (dest_ty.isAnyError(mod)) break :disjoint false; if (operand_ty.isAnyError(mod)) break :disjoint false; const dest_err_names = dest_ty.errorSetNames(mod); for (0..dest_err_names.len) |dest_err_index| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_names.get(ip)[dest_err_index])) break :disjoint false; } if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and !ip.isInferredErrorSetType(operand_ty.toIntern())) { break :disjoint true; } _ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern()); _ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern()); for (0..dest_err_names.len) |dest_err_index| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_names.get(ip)[dest_err_index])) break :disjoint false; } break :disjoint true; }; if (disjoint and dest_tag != .ErrorUnion) { return sema.fail(block, src, "error sets '{}' and '{}' have no common errors", .{ operand_ty.fmt(pt), dest_ty.fmt(pt), }); } if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) check: { const operand_val = mod.intern_pool.indexToKey(val.toIntern()); var error_name: InternPool.NullTerminatedString = undefined; if (operand_tag == .ErrorUnion) { if (operand_val.error_union.val != .err_name) break :check; error_name = operand_val.error_union.val.err_name; } else { error_name = operand_val.err.name; } if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) { return sema.fail(block, src, "'error.{}' not a member of error set '{}'", .{ error_name.fmt(ip), dest_ty.fmt(pt), }); } } return Air.internedToRef((try pt.getCoerced(val, base_dest_ty)).toIntern()); } try sema.requireRuntimeBlock(block, src, operand_src); const err_int_ty = try pt.errorIntType(); if (block.wantSafety() and !dest_ty.isAnyError(mod) and dest_ty.toIntern() != .adhoc_inferred_error_set_type and mod.backendSupportsFeature(.error_set_has_value)) { if (dest_tag == .ErrorUnion) { const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand); const err_int = try block.addBitCast(err_int_ty, err_code); const zero_err = try pt.intRef(try pt.errorIntType(), 0); const is_zero = try block.addBinOp(.cmp_eq, err_int, zero_err); if (disjoint) { // Error must be zero. try sema.addSafetyCheck(block, src, is_zero, .invalid_error_code); } else { // Error must be in destination set or zero. const has_value = try block.addTyOp(.error_set_has_value, dest_ty, err_code); const ok = try block.addBinOp(.bool_or, has_value, is_zero); try sema.addSafetyCheck(block, src, ok, .invalid_error_code); } } else { const err_int_inst = try block.addBitCast(err_int_ty, operand); const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst); try sema.addSafetyCheck(block, src, ok, .invalid_error_code); } } return block.addBitCast(base_dest_ty, operand); } fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?; const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small))); const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.src(.{ .node_offset_ptrcast_operand = extra.node }); const operand = try sema.resolveInst(extra.rhs); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, flags.needResultTypeBuiltinName()); return sema.ptrCastFull( block, flags, src, operand, operand_src, dest_ty, flags.needResultTypeBuiltinName(), ); } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrCast"); const operand = try sema.resolveInst(extra.rhs); return sema.ptrCastFull( block, .{ .ptr_cast = true }, src, operand, operand_src, dest_ty, "@ptrCast", ); } fn ptrCastFull( sema: *Sema, block: *Block, flags: Zir.Inst.FullPtrCastFlags, src: LazySrcLoc, operand: Air.Inst.Ref, operand_src: LazySrcLoc, dest_ty: Type, operation: []const u8, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); try sema.checkPtrType(block, src, dest_ty, true); try sema.checkPtrOperand(block, operand_src, operand_ty); const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); try Type.fromInterned(src_info.child).resolveLayout(pt); try Type.fromInterned(dest_info.child).resolveLayout(pt); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); const dest_slice_like = dest_info.flags.size == .Slice or (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Array); if (dest_info.flags.size == .Slice and !src_slice_like) { return sema.fail(block, src, "illegal pointer cast to slice", .{}); } if (dest_info.flags.size == .Slice) { const src_elem_size = switch (src_info.flags.size) { .Slice => Type.fromInterned(src_info.child).abiSize(pt), // pointer to array .One => Type.fromInterned(src_info.child).childType(mod).abiSize(pt), else => unreachable, }; const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(pt); if (src_elem_size != dest_elem_size) { return sema.fail(block, src, "TODO: implement {s} between slices changing the length", .{operation}); } } // The checking logic in this function must stay in sync with Sema.coerceInMemoryAllowedPtrs if (!flags.ptr_cast) { check_size: { if (src_info.flags.size == dest_info.flags.size) break :check_size; if (src_slice_like and dest_slice_like) break :check_size; if (src_info.flags.size == .C) break :check_size; if (dest_info.flags.size == .C) break :check_size; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "cannot implicitly convert {s} pointer to {s} pointer", .{ pointerSizeString(src_info.flags.size), pointerSizeString(dest_info.flags.size), }); errdefer msg.destroy(sema.gpa); if (dest_info.flags.size == .Many and (src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array))) { try sema.errNote(src, msg, "use 'ptr' field to convert slice to many pointer", .{}); } else { try sema.errNote(src, msg, "use @ptrCast to change pointer size", .{}); } break :msg msg; }); } check_child: { const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: { // *[n]T -> []T break :blk Type.fromInterned(src_info.child).childType(mod); } else Type.fromInterned(src_info.child); const dest_child = Type.fromInterned(dest_info.child); const imc_res = try sema.coerceInMemoryAllowed( block, dest_child, src_child, !dest_info.flags.is_const, mod.getTarget(), src, operand_src, null, ); if (imc_res == .ok) break :check_child; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer element type '{}' cannot coerce into element type '{}'", .{ src_child.fmt(pt), dest_child.fmt(pt), }); errdefer msg.destroy(sema.gpa); try imc_res.report(sema, src, msg); try sema.errNote(src, msg, "use @ptrCast to cast pointer element type", .{}); break :msg msg; }); } check_sent: { if (dest_info.sentinel == .none) break :check_sent; if (src_info.flags.size == .C) break :check_sent; if (src_info.sentinel != .none) { const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child); if (dest_info.sentinel == coerced_sent) break :check_sent; } if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) { // [*]nT -> []T const arr_ty = Type.fromInterned(src_info.child); if (arr_ty.sentinel(mod)) |src_sentinel| { const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child); if (dest_info.sentinel == coerced_sent) break :check_sent; } } return sema.failWithOwnedErrorMsg(block, msg: { const msg = if (src_info.sentinel == .none) blk: { break :blk try sema.errMsg(src, "destination pointer requires '{}' sentinel", .{ Value.fromInterned(dest_info.sentinel).fmtValueSema(pt, sema), }); } else blk: { break :blk try sema.errMsg(src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{ Value.fromInterned(src_info.sentinel).fmtValueSema(pt, sema), Value.fromInterned(dest_info.sentinel).fmtValueSema(pt, sema), }); }; errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @ptrCast to cast pointer sentinel", .{}); break :msg msg; }); } if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer host size '{}' cannot coerce into pointer host size '{}'", .{ src_info.packed_offset.host_size, dest_info.packed_offset.host_size, }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @ptrCast to cast pointer host size", .{}); break :msg msg; }); } if (src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer bit offset '{}' cannot coerce into pointer bit offset '{}'", .{ src_info.packed_offset.bit_offset, dest_info.packed_offset.bit_offset, }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @ptrCast to cast pointer bit offset", .{}); break :msg msg; }); } check_allowzero: { const src_allows_zero = operand_ty.ptrAllowsZero(mod); const dest_allows_zero = dest_ty.ptrAllowsZero(mod); if (!src_allows_zero) break :check_allowzero; if (dest_allows_zero) break :check_allowzero; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "'{}' could have null values which are illegal in type '{}'", .{ operand_ty.fmt(pt), dest_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @ptrCast to assert the pointer is not null", .{}); break :msg msg; }); } // TODO: vector index? } const src_align = if (src_info.flags.alignment != .none) src_info.flags.alignment else Type.fromInterned(src_info.child).abiAlignment(pt); const dest_align = if (dest_info.flags.alignment != .none) dest_info.flags.alignment else Type.fromInterned(dest_info.child).abiAlignment(pt); if (!flags.align_cast) { if (dest_align.compare(.gt, src_align)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "{s} increases pointer alignment", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_src, msg, "'{}' has alignment '{d}'", .{ operand_ty.fmt(pt), src_align.toByteUnits() orelse 0, }); try sema.errNote(src, msg, "'{}' has alignment '{d}'", .{ dest_ty.fmt(pt), dest_align.toByteUnits() orelse 0, }); try sema.errNote(src, msg, "use @alignCast to assert pointer alignment", .{}); break :msg msg; }); } } if (!flags.addrspace_cast) { if (src_info.flags.address_space != dest_info.flags.address_space) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "{s} changes pointer address space", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_src, msg, "'{}' has address space '{s}'", .{ operand_ty.fmt(pt), @tagName(src_info.flags.address_space), }); try sema.errNote(src, msg, "'{}' has address space '{s}'", .{ dest_ty.fmt(pt), @tagName(dest_info.flags.address_space), }); try sema.errNote(src, msg, "use @addrSpaceCast to cast pointer address space", .{}); break :msg msg; }); } } else { // Some address space casts are always disallowed if (!target_util.addrSpaceCastIsValid(mod.getTarget(), src_info.flags.address_space, dest_info.flags.address_space)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "invalid address space cast", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_src, msg, "address space '{s}' is not compatible with address space '{s}'", .{ @tagName(src_info.flags.address_space), @tagName(dest_info.flags.address_space), }); break :msg msg; }); } } if (!flags.const_cast) { if (src_info.flags.is_const and !dest_info.flags.is_const) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "{s} discards const qualifier", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @constCast to discard const qualifier", .{}); break :msg msg; }); } } if (!flags.volatile_cast) { if (src_info.flags.is_volatile and !dest_info.flags.is_volatile) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "{s} discards volatile qualifier", .{operation}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "use @volatileCast to discard volatile qualifier", .{}); break :msg msg; }); } } const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: { if (operand_ty.zigTypeTag(mod) == .Optional) { break :ptr try sema.analyzeOptionalSlicePtr(block, operand_src, operand, operand_ty); } else { break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty); } } else operand; const dest_ptr_ty = if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) blk: { // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; const ty = try pt.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { break :blk try pt.optionalType(ty.toIntern()); } else { break :blk ty; } } else dest_ty; // Cannot do @addrSpaceCast at comptime if (!flags.addrspace_cast) { if (try sema.resolveValue(ptr)) |ptr_val| { if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)}); } if (dest_align.compare(.gt, src_align)) { if (try ptr_val.getUnsignedIntAdvanced(pt, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align.toByteUnits().?, }); } } } if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { if (ptr_val.isUndef(mod)) return pt.undefRef(dest_ty); const arr_len = try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod)); const ptr_val_key = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; return Air.internedToRef((try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try pt.intern(.{ .ptr = .{ .ty = dest_ty.slicePtrFieldType(mod).toIntern(), .base_addr = ptr_val_key.base_addr, .byte_offset = ptr_val_key.byte_offset, } }), .len = arr_len.toIntern(), } }))); } else { assert(dest_ptr_ty.eql(dest_ty, mod)); return Air.internedToRef((try pt.getCoerced(ptr_val, dest_ty)).toIntern()); } } } try sema.requireRuntimeBlock(block, src, null); try sema.validateRuntimeValue(block, operand_src, ptr); if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and (try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)) or Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { const len = try sema.analyzeSliceLen(block, operand_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bool_or, len_zero, is_non_zero); } else is_non_zero; try sema.addSafetyCheck(block, src, ok, .cast_to_null); } if (block.wantSafety() and dest_align.compare(.gt, src_align) and try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child))) { const align_bytes_minus_1 = dest_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); const ok = if (src_info.flags.size == .Slice and dest_info.flags.size == .Slice) ok: { const len = try sema.analyzeSliceLen(block, operand_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bool_or, len_zero, is_aligned); } else is_aligned; try sema.addSafetyCheck(block, src, ok, .incorrect_alignment); } // If we're going from an array pointer to a slice, this will only be the pointer part! const result_ptr = if (flags.addrspace_cast) ptr: { // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; const intermediate_ptr_ty = try pt.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { break :blk try pt.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; const intermediate = try block.addInst(.{ .tag = .addrspace_cast, .data = .{ .ty_op = .{ .ty = Air.internedToRef(intermediate_ty.toIntern()), .operand = ptr, } }, }); if (intermediate_ty.eql(dest_ptr_ty, mod)) { // We only changed the address space, so no need for a bitcast break :ptr intermediate; } break :ptr try block.addBitCast(dest_ptr_ty, intermediate); } else ptr: { break :ptr try block.addBitCast(dest_ptr_ty, ptr); }; if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) { // We have to construct a slice using the operand's child's array length // Note that we know from the check at the start of the function that operand_ty is slice-like const arr_len = Air.internedToRef((try pt.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern()); return block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(dest_ty.toIntern()), .payload = try sema.addExtra(Air.Bin{ .lhs = result_ptr, .rhs = arr_len, }), } }, }); } else { assert(dest_ptr_ty.eql(dest_ty, mod)); try sema.checkKnownAllocPtr(block, operand, result_ptr); return result_ptr; } } fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?; const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small))); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); const operand_src = block.src(.{ .node_offset_ptrcast_operand = extra.node }); const operand = try sema.resolveInst(extra.operand); const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); var ptr_info = operand_ty.ptrInfo(mod); if (flags.const_cast) ptr_info.flags.is_const = false; if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { const dest_ty = try pt.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { break :blk try pt.optionalType(dest_ty.toIntern()); } break :blk dest_ty; }; if (try sema.resolveValue(operand)) |operand_val| { return Air.internedToRef((try pt.getCoerced(operand_val, dest_ty)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); const new_ptr = try block.addBitCast(dest_ty, operand); try sema.checkKnownAllocPtr(block, operand, new_ptr); return new_ptr; } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@truncate"); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, src); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); const operand_is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_is_vector = dest_ty.zigTypeTag(mod) == .Vector; if (operand_is_vector != dest_is_vector) { return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) }); } if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.coerce(block, dest_ty, operand, operand_src); } const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return Air.internedToRef(val.toIntern()); } if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return Air.internedToRef(val.toIntern()); } if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ @tagName(dest_info.signedness), operand_ty.fmt(pt), }); } if (operand_info.bits < dest_info.bits) { const msg = msg: { const msg = try sema.errMsg( src, "destination type '{}' has more bits than source type '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "destination type has {d} bits", .{ dest_info.bits, }); try sema.errNote(operand_src, msg, "operand type has {d} bits", .{ operand_info.bits, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } if (try sema.resolveValueIntable(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(dest_ty); if (!dest_is_vector) { return Air.internedToRef((try pt.getCoerced( try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, pt), dest_ty, )).toIntern()); } const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); const uncoerced_elem = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, pt); elem.* = (try pt.getCoerced(uncoerced_elem, dest_scalar_ty)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(.trunc, dest_ty, operand); } fn zirBitCount( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, comptime comptimeOp: fn (val: Value, ty: Type, pt: Zcu.PerThread) u64, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return Air.internedToRef(val.toIntern()); } const result_scalar_ty = try pt.smallestUnsignedInt(bits); switch (operand_ty.zigTypeTag(mod)) { .Vector => { const vec_len = operand_ty.vectorLen(mod); const result_ty = try pt.vectorType(.{ .len = vec_len, .child = result_scalar_ty.toIntern(), }); if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(result_ty); const elems = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); const count = comptimeOp(elem_val, scalar_ty, pt); elem.* = (try pt.intValue(result_scalar_ty, count)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); } }, .Int => { if (try sema.resolveValueResolveLazy(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(result_scalar_ty); return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, pt)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); } }, else => unreachable, } } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", .{ scalar_ty.fmt(pt), bits }, ); } if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return Air.internedToRef(val.toIntern()); } switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(operand_ty); const result_val = try val.byteSwap(operand_ty, pt, sema.arena); return Air.internedToRef(result_val.toIntern()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addTyOp(.byte_swap, operand_ty, operand); }, .Vector => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(operand_ty); const vec_len = operand_ty.vectorLen(mod); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); elem.* = (try elem_val.byteSwap(scalar_ty, pt, sema.arena)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addTyOp(.byte_swap, operand_ty, operand); }, else => unreachable, } } fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return Air.internedToRef(val.toIntern()); } const pt = sema.pt; const mod = pt.zcu; switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(operand_ty); const result_val = try val.bitReverse(operand_ty, pt, sema.arena); return Air.internedToRef(result_val.toIntern()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addTyOp(.bit_reverse, operand_ty, operand); }, .Vector => { const runtime_src = if (try sema.resolveValue(operand)) |val| { if (val.isUndef(mod)) return pt.undefRef(operand_ty); const vec_len = operand_ty.vectorLen(mod); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(pt, i); elem.* = (try elem_val.bitReverse(scalar_ty, pt, sema.arena)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = operand_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addTyOp(.bit_reverse, operand_ty, operand); }, else => unreachable, } } fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); return sema.pt.intRef(Type.comptime_int, offset); } fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const offset = try sema.bitOffsetOf(block, inst); // TODO reminder to make this a compile error for packed structs return sema.pt.intRef(Type.comptime_int, offset / 8); } fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.src(.{ .node_offset_bin_op = inst_data.src_node }); const lhs_src = block.src(.{ .node_offset_bin_lhs = inst_data.src_node }); const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node }); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty = try sema.resolveType(block, lhs_src, extra.lhs); const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, .{ .needed_comptime_reason = "name of field must be comptime-known", }); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; try ty.resolveLayout(pt); switch (ty.zigTypeTag(mod)) { .Struct => {}, else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(pt)}), } const field_index = if (ty.isTuple(mod)) blk: { if (field_name.eqlSlice("len", ip)) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); } else try sema.structFieldIndex(block, ty, field_name, rhs_src); if (ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "no offset available for comptime field", .{}); } switch (ty.containerLayout(mod)) { .@"packed" => { var bit_sum: u64 = 0; const struct_type = ip.loadStructType(ty.toIntern()); for (0..struct_type.field_types.len) |i| { if (i == field_index) { return bit_sum; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); bit_sum += field_ty.bitSize(pt); } else unreachable; }, else => return ty.structFieldOffset(field_index, pt) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(pt)}), } } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { const pt = sema.pt; const mod = pt.zcu; switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(pt)}), } } fn checkInvalidPtrIntArithmetic( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (try ty.zigTypeTagOrPoison(mod)) { .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.failWithInvalidPtrArithmetic(block, src, "pointer-integer", "addition and subtraction"), }, else => return, } } fn checkArithmeticOp( sema: *Sema, block: *Block, src: LazySrcLoc, scalar_tag: std.builtin.TypeId, lhs_zig_ty_tag: std.builtin.TypeId, rhs_zig_ty_tag: std.builtin.TypeId, zir_tag: Zir.Inst.Tag, ) CompileError!void { const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag), }); } } fn checkPtrOperand( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { const msg = try sema.errMsg( ty_src, "expected pointer, found '{}'", .{ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote(ty_src, msg, "use '&' to obtain a function pointer", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}); } fn checkPtrType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, allow_slice: bool, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Pointer => if (allow_slice or !ty.isSlice(mod)) return, .Fn => { const msg = msg: { const msg = try sema.errMsg( ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}, ); errdefer msg.destroy(sema.gpa); try sema.errNote(ty_src, msg, "use '*const ' to make a function pointer type", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, .Optional => if (ty.childType(mod).zigTypeTag(mod) == .Pointer) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)}); } fn checkVectorElemType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, .Optional, .Pointer => if (ty.isPtrAtRuntime(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(pt)}); } fn checkFloatType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(pt)}), } } fn checkNumericType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(pt)}), } } /// Returns the casted pointer. fn checkAtomicPtrOperand( sema: *Sema, block: *Block, elem_ty: Type, elem_ty_src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.FloatTooBig => return sema.fail( block, elem_ty_src, "expected {d}-bit float type or smaller; found {d}-bit float type", .{ diag.max_bits, diag.bits }, ), error.IntTooBig => return sema.fail( block, elem_ty_src, "expected {d}-bit integer type or smaller; found {d}-bit integer type", .{ diag.max_bits, diag.bits }, ), error.BadType => return sema.fail( block, elem_ty_src, "expected bool, integer, float, enum, packed struct, or pointer type; found '{}'", .{elem_ty.fmt(pt)}, ), }; var wanted_ptr_data: InternPool.Key.PtrType = .{ .child = elem_ty.toIntern(), .flags = .{ .alignment = alignment, .is_const = ptr_const, }, }; const ptr_ty = sema.typeOf(ptr); const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, }; wanted_ptr_data.flags.address_space = ptr_data.flags.address_space; wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; } fn checkPtrIsNotComptimeMutable( sema: *Sema, block: *Block, ptr_val: Value, ptr_src: LazySrcLoc, operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; if (sema.isComptimeMutablePtr(ptr_val)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } fn checkIntOrVector( sema: *Sema, block: *Block, operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(pt), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ operand_ty.fmt(pt), }), } } fn checkIntOrVectorAllowComptime( sema: *Sema, block: *Block, operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { const pt = sema.pt; const mod = pt.zcu; switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(pt), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ operand_ty.fmt(pt), }), } } const SimdBinOp = struct { len: ?usize, /// Coerced to `result_ty`. lhs: Air.Inst.Ref, /// Coerced to `result_ty`. rhs: Air.Inst.Ref, lhs_val: ?Value, rhs_val: ?Value, /// Only different than `scalar_ty` when it is a vector operation. result_ty: Type, scalar_ty: Type, }; fn checkSimdBinOp( sema: *Sema, block: *Block, src: LazySrcLoc, uncasted_lhs: Air.Inst.Ref, uncasted_rhs: Air.Inst.Ref, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); return SimdBinOp{ .len = vec_len, .lhs = lhs, .rhs = rhs, .lhs_val = try sema.resolveValue(lhs), .rhs_val = try sema.resolveValue(rhs), .result_ty = result_ty, .scalar_ty = result_ty.scalarType(mod), }; } fn checkVectorizableBinaryOperands( sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { .Vector, .Array => true, else => false, }; const rhs_is_vector = switch (rhs_zig_ty_tag) { .Vector, .Array => true, else => false, }; if (lhs_is_vector and rhs_is_vector) { const lhs_len = lhs_ty.arrayLen(mod); const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(src, "vector length mismatch", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(lhs_src, msg, "length {d} here", .{lhs_len}); try sema.errNote(rhs_src, msg, "length {d} here", .{rhs_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } else { const msg = msg: { const msg = try sema.errMsg(src, "mixed scalar and vector operands: '{}' and '{}'", .{ lhs_ty.fmt(pt), rhs_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { try sema.errNote(lhs_src, msg, "vector here", .{}); try sema.errNote(rhs_src, msg, "scalar here", .{}); } else { try sema.errNote(lhs_src, msg, "scalar here", .{}); try sema.errNote(rhs_src, msg, "vector here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } fn resolveExportOptions( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!Module.Export.Options { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const export_options_ty = try pt.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const linkage_src = block.src(.{ .init_field_linkage = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const section_src = block.src(.{ .init_field_section = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const visibility_src = block.src(.{ .init_field_visibility = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_operand, .{ .needed_comptime_reason = "name of exported value must be comptime-known", }); const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_operand, .{ .needed_comptime_reason = "linkage of exported value must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "section", .no_embedded_nulls), section_src); const section_opt_val = try sema.resolveConstDefinedValue(block, section_src, section_operand, .{ .needed_comptime_reason = "linksection of exported value must be comptime-known", }); const section = if (section_opt_val.optionalValue(mod)) |section_val| try sema.toConstString(block, section_src, Air.internedToRef(section_val.toIntern()), .{ .needed_comptime_reason = "linksection of exported value must be comptime-known", }) else null; const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "visibility", .no_embedded_nulls), visibility_src); const visibility_val = try sema.resolveConstDefinedValue(block, visibility_src, visibility_operand, .{ .needed_comptime_reason = "visibility of exported value must be comptime-known", }); const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); if (name.len < 1) { return sema.fail(block, name_src, "exported symbol name cannot be empty", .{}); } if (visibility != .default and linkage == .internal) { return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{ name, @tagName(visibility), }); } return .{ .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), .linkage = linkage, .section = try ip.getOrPutStringOpt(gpa, pt.tid, section, .no_embedded_nulls), .visibility = visibility, }; } fn resolveBuiltinEnum( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, comptime name: []const u8, reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { const pt = sema.pt; const ty = try pt.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); return pt.zcu.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, reason: NeededComptimeReason, ) CompileError!std.builtin.AtomicOrder { return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicOrder", reason); } fn resolveAtomicRmwOp( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.AtomicRmwOp { return sema.resolveBuiltinEnum(block, src, zir_ref, "AtomicRmwOp", .{ .needed_comptime_reason = "@atomicRmW operation must be comptime-known", }); } fn zirCmpxchg( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, 1 => .cmpxchg_strong, else => unreachable, }; const src = block.nodeOffset(extra.node); // zig fmt: off const elem_ty_src = block.builtinCallArgSrc(extra.node, 0); const ptr_src = block.builtinCallArgSrc(extra.node, 1); const expected_src = block.builtinCallArgSrc(extra.node, 2); const new_value_src = block.builtinCallArgSrc(extra.node, 3); const success_order_src = block.builtinCallArgSrc(extra.node, 4); const failure_order_src = block.builtinCallArgSrc(extra.node, 5); // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, packed struct, or pointer type; found '{}'", .{elem_ty.fmt(pt)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const new_value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.new_value), new_value_src); const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order, .{ .needed_comptime_reason = "atomic order of cmpxchg success must be comptime-known", }); const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order, .{ .needed_comptime_reason = "atomic order of cmpxchg failure must be comptime-known", }); if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) { return sema.fail(block, success_order_src, "success atomic ordering must be monotonic or stricter", .{}); } if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be monotonic or stricter", .{}); } if (@intFromEnum(failure_order) > @intFromEnum(success_order)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{}); } if (failure_order == .release or failure_order == .acq_rel) { return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{}); } const result_ty = try pt.optionalType(elem_ty.toIntern()); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { return Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = result_ty.toIntern(), .val = .none, } }))); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveValue(expected_value)) |expected_val| { if (try sema.resolveValue(new_value)) |new_val| { if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return pt.undefRef(result_ty); } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = try pt.intern(.{ .opt = .{ .ty = result_ty.toIntern(), .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk .none; } else stored_val.toIntern(), } }); return Air.internedToRef(result_val); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; const flags: u32 = @as(u32, @intFromEnum(success_order)) | (@as(u32, @intFromEnum(failure_order)) << 3); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addInst(.{ .tag = air_tag, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(result_ty.toIntern()), .payload = try sema.addExtra(Air.Cmpxchg{ .ptr = ptr, .expected_value = expected_value, .new_value = new_value, .flags = flags, }), } }, }); } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const scalar_src = block.builtinCallArgSrc(inst_data.src_node, 0); const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@splat"); if (!dest_ty.isVector(mod)) return sema.fail(block, src, "expected vector type, found '{}'", .{dest_ty.fmt(pt)}); if (!dest_ty.hasRuntimeBits(pt)) { const empty_aggregate = try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = &[_]InternPool.Index{} }, } }); return Air.internedToRef(empty_aggregate); } const operand = try sema.resolveInst(extra.rhs); const scalar_ty = dest_ty.childType(mod); const scalar = try sema.coerce(block, scalar_ty, operand, scalar_src); if (try sema.resolveValue(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return pt.undefRef(dest_ty); return Air.internedToRef((try sema.splat(dest_ty, scalar_val)).toIntern()); } try sema.requireRuntimeBlock(block, src, scalar_src); return block.addTyOp(.splat, dest_ty, scalar); } fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const op_src = block.builtinCallArgSrc(inst_data.src_node, 0); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 1); const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", .{ .needed_comptime_reason = "@reduce operation must be comptime-known", }); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const pt = sema.pt; const mod = pt.zcu; if (operand_ty.zigTypeTag(mod) != .Vector) { return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)}); } const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ @tagName(operation), operand_ty.fmt(pt), }), }, .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ @tagName(operation), operand_ty.fmt(pt), }), }, } const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. return sema.fail(block, operand_src, "@reduce operation requires a vector with nonzero length", .{}); } if (try sema.resolveValue(operand)) |operand_val| { if (operand_val.isUndef(mod)) return pt.undefRef(scalar_ty); var accum: Value = try operand_val.elemValue(pt, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { const elem_val = try operand_val.elemValue(pt, i); switch (operation) { .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, pt), .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, pt), .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, pt), .Min => accum = accum.numberMin(elem_val, pt), .Max => accum = accum.numberMax(elem_val, pt), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, pt), } } return Air.internedToRef(accum.toIntern()); } try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), operand_src); return block.addInst(.{ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ .operand = operand, .operation = operation, } }, }); } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const mask_src = block.builtinCallArgSrc(inst_data.src_node, 3); const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type); try sema.checkVectorElemType(block, elem_ty_src, elem_ty); const a = try sema.resolveInst(extra.a); const b = try sema.resolveInst(extra.b); var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(pt)}), }; mask_ty = try pt.vectorType(.{ .len = @intCast(mask_len), .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstValue(block, mask_src, mask, .{ .needed_comptime_reason = "shuffle mask must be comptime-known", }); return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(mask_len)); } fn analyzeShuffle( sema: *Sema, block: *Block, src_node: i32, elem_ty: Type, a_arg: Air.Inst.Ref, b_arg: Air.Inst.Ref, mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const a_src = block.builtinCallArgSrc(src_node, 1); const b_src = block.builtinCallArgSrc(src_node, 2); const mask_src = block.builtinCallArgSrc(src_node, 3); var a = a_arg; var b = b_arg; const res_ty = try pt.vectorType(.{ .len = mask_len, .child = elem_ty.toIntern(), }); const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(pt.zcu)) { .Array, .Vector => sema.typeOf(a).arrayLen(pt.zcu), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(pt), sema.typeOf(a).fmt(pt), }), }; const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(pt.zcu)) { .Array, .Vector => sema.typeOf(b).arrayLen(pt.zcu), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(pt), sema.typeOf(b).fmt(pt), }), }; if (maybe_a_len == null and maybe_b_len == null) { return pt.undefRef(res_ty); } const a_len: u32 = @intCast(maybe_a_len orelse maybe_b_len.?); const b_len: u32 = @intCast(maybe_b_len orelse a_len); const a_ty = try pt.vectorType(.{ .len = a_len, .child = elem_ty.toIntern(), }); const b_ty = try pt.vectorType(.{ .len = b_len, .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try pt.undefRef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); if (maybe_b_len == null) b = try pt.undefRef(b_ty) else b = try sema.coerce(block, b_ty, b, b_src); const operand_info = [2]std.meta.Tuple(&.{ u64, LazySrcLoc, Type }){ .{ a_len, a_src, a_ty }, .{ b_len, b_src, b_ty }, }; for (0..@intCast(mask_len)) |i| { const elem = try mask.elemValue(pt, i); if (elem.isUndef(pt.zcu)) continue; const elem_resolved = try sema.resolveLazyValue(elem); const int = elem_resolved.toSignedInt(pt); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { unsigned = @intCast(int); chosen = 0; } else { unsigned = @intCast(~int); chosen = 1; } if (unsigned >= operand_info[chosen][0]) { const msg = msg: { const msg = try sema.errMsg(mask_src, "mask index '{d}' has out-of-bounds selection", .{i}); errdefer msg.destroy(sema.gpa); try sema.errNote(operand_info[chosen][1], msg, "selected index '{d}' out of bounds of '{}'", .{ unsigned, operand_info[chosen][2].fmt(pt), }); if (chosen == 0) { try sema.errNote(b_src, msg, "selections from the second vector are specified with negative numbers", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } if (try sema.resolveValue(a)) |a_val| { if (try sema.resolveValue(b)) |b_val| { const values = try sema.arena.alloc(InternPool.Index, mask_len); for (values, 0..) |*value, i| { const mask_elem_val = try mask.elemValue(pt, i); if (mask_elem_val.isUndef(pt.zcu)) { value.* = try pt.intern(.{ .undef = elem_ty.toIntern() }); continue; } const int = mask_elem_val.toSignedInt(pt); const unsigned: u32 = @intCast(if (int >= 0) int else ~int); values[i] = (try (if (int >= 0) a_val else b_val).elemValue(pt, unsigned)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = res_ty.toIntern(), .storage = .{ .elems = values }, } }))); } } // All static analysis passed, and not comptime. // For runtime codegen, vectors a and b must be the same length. Here we // recursively @shuffle the smaller vector to append undefined elements // to it up to the length of the longer vector. This recursion terminates // in 1 call because these calls to analyzeShuffle guarantee a_len == b_len. if (a_len != b_len) { const min_len = @min(a_len, b_len); const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); for (@intCast(0)..@intCast(min_len)) |i| { expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern(); } for (@intCast(min_len)..@intCast(max_len)) |i| { expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern(); } const expand_mask = try pt.intern(.{ .aggregate = .{ .ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(), .storage = .{ .elems = expand_mask_values }, } }); if (a_len < b_len) { const undef = try pt.undefRef(a_ty); a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len)); } else { const undef = try pt.undefRef(b_ty); b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len)); } } return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(res_ty.toIntern()), .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, .mask = mask.toIntern(), .mask_len = mask_len, }), } }, }); } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = block.nodeOffset(extra.node); const elem_ty_src = block.builtinCallArgSrc(extra.node, 0); const pred_src = block.builtinCallArgSrc(extra.node, 1); const a_src = block.builtinCallArgSrc(extra.node, 2); const b_src = block.builtinCallArgSrc(extra.node, 3); const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type); try sema.checkVectorElemType(block, elem_ty_src, elem_ty); const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(pt)}), }; const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64)); const bool_vec_ty = try pt.vectorType(.{ .len = vec_len, .child = .bool_type, }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); const vec_ty = try pt.vectorType(.{ .len = vec_len, .child = elem_ty.toIntern(), }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); const maybe_pred = try sema.resolveValue(pred); const maybe_a = try sema.resolveValue(a); const maybe_b = try sema.resolveValue(b); const runtime_src = if (maybe_pred) |pred_val| rs: { if (pred_val.isUndef(mod)) return pt.undefRef(vec_ty); if (maybe_a) |a_val| { if (a_val.isUndef(mod)) return pt.undefRef(vec_ty); if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); const elems = try sema.gpa.alloc(InternPool.Index, vec_len); defer sema.gpa.free(elems); for (elems, 0..) |*elem, i| { const pred_elem_val = try pred_val.elemValue(pt, i); const should_choose_a = pred_elem_val.toBool(); elem.* = (try (if (should_choose_a) a_val else b_val).elemValue(pt, i)).toIntern(); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = vec_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } else { break :rs b_src; } } else { if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { if (a_val.isUndef(mod)) return pt.undefRef(vec_ty); } if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return pt.undefRef(vec_ty); } break :rs pred_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addInst(.{ .tag = .select, .data = .{ .pl_op = .{ .operand = pred, .payload = try block.sema.addExtra(Air.Bin{ .lhs = a, .rhs = b, }), } }, }); } fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data; // zig fmt: off const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 1); const order_src = block.builtinCallArgSrc(inst_data.src_node, 2); // zig fmt: on const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type); const uncasted_ptr = try sema.resolveInst(extra.ptr); const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .needed_comptime_reason = "atomic order of @atomicLoad must be comptime-known", }); switch (order) { .release, .acq_rel => { return sema.fail( block, order_src, "@atomicLoad atomic ordering must not be release or acq_rel", .{}, ); }, else => {}, } if (try sema.typeHasOnePossibleValue(elem_ty)) |val| { return Air.internedToRef(val.toIntern()); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, sema.typeOf(ptr))) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } } try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src); return block.addInst(.{ .tag = .atomic_load, .data = .{ .atomic_load = .{ .ptr = ptr, .order = order, } }, }); } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); // zig fmt: off const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 1); const op_src = block.builtinCallArgSrc(inst_data.src_node, 2); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 3); const order_src = block.builtinCallArgSrc(inst_data.src_node, 4); // zig fmt: on const operand = try sema.resolveInst(extra.operand); const elem_ty = sema.typeOf(operand); const uncasted_ptr = try sema.resolveInst(extra.ptr); const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, .Bool => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); }, .Float => switch (op) { .Xchg, .Add, .Sub, .Max, .Min => {}, else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}), }, else => {}, } const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .needed_comptime_reason = "atomic order of @atomicRmW must be comptime-known", }); if (order == .unordered) { return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be unordered", .{}); } // special case zero bit types if (try sema.typeHasOnePossibleValue(elem_ty)) |val| { return Air.internedToRef(val.toIntern()); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const maybe_operand_val = try sema.resolveValue(operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (sema.isComptimeMutablePtr(ptr_val)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { // zig fmt: off .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, pt), .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, pt), .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, pt), .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, pt), .Max => stored_val.numberMax (operand_val, pt), .Min => stored_val.numberMin (operand_val, pt), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); return Air.internedToRef(stored_val.toIntern()); } else break :rs ptr_src; } else ptr_src; const flags: u32 = @as(u32, @intFromEnum(order)) | (@as(u32, @intFromEnum(op)) << 3); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addInst(.{ .tag = .atomic_rmw, .data = .{ .pl_op = .{ .operand = ptr, .payload = try sema.addExtra(Air.AtomicRmw{ .operand = operand, .flags = flags, }), } }, }); } fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); // zig fmt: off const elem_ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const ptr_src = block.builtinCallArgSrc(inst_data.src_node, 1); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 2); const order_src = block.builtinCallArgSrc(inst_data.src_node, 3); // zig fmt: on const operand = try sema.resolveInst(extra.operand); const elem_ty = sema.typeOf(operand); const uncasted_ptr = try sema.resolveInst(extra.ptr); const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering, .{ .needed_comptime_reason = "atomic order of @atomicStore must be comptime-known", }); const air_tag: Air.Inst.Tag = switch (order) { .acquire, .acq_rel => { return sema.fail( block, order_src, "@atomicStore atomic ordering must not be acquire or acq_rel", .{}, ); }, .unordered => .atomic_store_unordered, .monotonic => .atomic_store_monotonic, .release => .atomic_store_release, .seq_cst => .atomic_store_seq_cst, }; return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag); } fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.MulAdd, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const mulend1_src = block.builtinCallArgSrc(inst_data.src_node, 1); const mulend2_src = block.builtinCallArgSrc(inst_data.src_node, 2); const addend_src = block.builtinCallArgSrc(inst_data.src_node, 3); const addend = try sema.resolveInst(extra.addend); const ty = sema.typeOf(addend); const mulend1 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend1), mulend1_src); const mulend2 = try sema.coerce(block, ty, try sema.resolveInst(extra.mulend2), mulend2_src); const maybe_mulend1 = try sema.resolveValue(mulend1); const maybe_mulend2 = try sema.resolveValue(mulend2); const maybe_addend = try sema.resolveValue(addend); const pt = sema.pt; const mod = pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(pt)}), } const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { if (mulend2_val.isUndef(mod)) return pt.undefRef(ty); if (maybe_addend) |addend_val| { if (addend_val.isUndef(mod)) return pt.undefRef(ty); const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, pt); return Air.internedToRef(result_val.toIntern()); } else { break :rs addend_src; } } else { if (maybe_addend) |addend_val| { if (addend_val.isUndef(mod)) return pt.undefRef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { if (mulend2_val.isUndef(mod)) return pt.undefRef(ty); } if (maybe_addend) |addend_val| { if (addend_val.isUndef(mod)) return pt.undefRef(ty); } break :rs mulend1_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addInst(.{ .tag = .mul_add, .data = .{ .pl_op = .{ .operand = addend, .payload = try sema.addExtra(Air.Bin{ .lhs = mulend1, .rhs = mulend2, }), } }, }); } fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const modifier_src = block.builtinCallArgSrc(inst_data.src_node, 0); const func_src = block.builtinCallArgSrc(inst_data.src_node, 1); const args_src = block.builtinCallArgSrc(inst_data.src_node, 2); const call_src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); const modifier_ty = try pt.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ .needed_comptime_reason = "call modifier must be comptime-known", }); var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val); switch (modifier) { // These can be upgraded to comptime or nosuspend calls. .auto, .never_tail, .no_async => { if (block.is_comptime) { if (modifier == .never_tail) { return sema.fail(block, modifier_src, "unable to perform 'never_tail' call at compile-time", .{}); } modifier = .compile_time; } else if (extra.flags.is_nosuspend) { modifier = .no_async; } }, // These can be upgraded to comptime. nosuspend bit can be safely ignored. .always_inline, .compile_time => { _ = (try sema.resolveDefinedValue(block, func_src, func)) orelse { return sema.fail(block, func_src, "modifier '{s}' requires a comptime-known function", .{@tagName(modifier)}); }; if (block.is_comptime) { modifier = .compile_time; } }, .always_tail => { if (block.is_comptime) { modifier = .compile_time; } }, .async_kw => { if (extra.flags.is_nosuspend) { return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used inside nosuspend block", .{}); } if (block.is_comptime) { return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used in combination with comptime function call", .{}); } }, .never_inline => { if (block.is_comptime) { return sema.fail(block, modifier_src, "unable to perform 'never_inline' call at compile-time", .{}); } }, } const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(pt)}); } const resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(i), args_ty); } const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall( block, func, func_ty, func_src, call_src, modifier, ensure_result_used, .{ .call_builtin = .{ .call_node_offset = inst_data.src_node, .args = resolved_args, } }, null, .@"@call", ); } fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data; const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?; const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small))); assert(!flags.ptr_cast); const inst_src = block.nodeOffset(extra.src_node); const field_name_src = block.builtinCallArgSrc(extra.src_node, 0); const field_ptr_src = block.builtinCallArgSrc(extra.src_node, 1); const parent_ptr_ty = try sema.resolveDestType(block, inst_src, extra.parent_ptr_type, .remove_eu, "@fieldParentPtr"); try sema.checkPtrType(block, inst_src, parent_ptr_ty, true); const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); if (parent_ptr_info.flags.size != .One) { return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)}); } const parent_ty = Type.fromInterned(parent_ptr_info.child); switch (parent_ty.zigTypeTag(zcu)) { .Struct, .Union => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}), } try parent_ty.resolveLayout(pt); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", }); const field_index = switch (parent_ty.zigTypeTag(zcu)) { .Struct => blk: { if (parent_ty.isTuple(zcu)) { if (field_name.eqlSlice("len", ip)) { return sema.fail(block, inst_src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, field_name_src); } else { break :blk try sema.structFieldIndex(block, parent_ty, field_name, field_name_src); } }, .Union => try sema.unionFieldIndex(block, parent_ty, field_name, field_name_src), else => unreachable, }; if (parent_ty.zigTypeTag(zcu) == .Struct and parent_ty.structFieldIsComptime(field_index, zcu)) { return sema.fail(block, field_name_src, "cannot get @fieldParentPtr of a comptime field", .{}); } const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); try sema.checkPtrOperand(block, field_ptr_src, field_ptr_ty); const field_ptr_info = field_ptr_ty.ptrInfo(zcu); var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(pt, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, .address_space = field_ptr_info.flags.address_space, }, .packed_offset = parent_ptr_info.packed_offset, }; const field_ty = parent_ty.structFieldType(field_index, zcu); var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ .alignment = try field_ptr_ty.ptrAlignmentAdvanced(pt, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, .address_space = field_ptr_info.flags.address_space, }, .packed_offset = field_ptr_info.packed_offset, }; switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( if (zcu.typeToStruct(parent_ty)) |struct_obj| try pt.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); actual_parent_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 }; actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 }; }, .@"extern" => { const field_offset = parent_ty.structFieldOffset(field_index, pt); actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (field_offset > 0) Alignment.fromLog2Units(@ctz(field_offset)) else actual_field_ptr_info.flags.alignment); actual_parent_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 }; actual_field_ptr_info.packed_offset = .{ .bit_offset = 0, .host_size = 0 }; }, .@"packed" => { const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) + (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) - actual_field_ptr_info.packed_offset.bit_offset), 8) catch return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{}); actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0) Alignment.fromLog2Units(@ctz(byte_offset)) else actual_field_ptr_info.flags.alignment); }, } const actual_field_ptr_ty = try pt.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); const actual_parent_ptr_ty = try pt.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { .Struct => switch (parent_ty.containerLayout(zcu)) { .auto => {}, .@"extern" => { const byte_offset = parent_ty.structFieldOffset(field_index, pt); const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); }, .@"packed" => { // Logic lifted from type computation above - I'm just assuming it's correct. // `catch unreachable` since error case handled above. const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) + pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) - actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable; const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); }, }, .Union => switch (parent_ty.containerLayout(zcu)) { .auto => {}, .@"extern", .@"packed" => { // For an extern or packed union, just coerce the pointer. const parent_ptr_val = try pt.getCoerced(field_ptr_val, actual_parent_ptr_ty); break :result Air.internedToRef(parent_ptr_val.toIntern()); }, }, else => unreachable, } const opt_field: ?InternPool.Key.Ptr.BaseAddr.BaseIndex = opt_field: { const ptr = switch (ip.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| ptr, else => break :opt_field null, }; if (ptr.byte_offset != 0) break :opt_field null; break :opt_field switch (ptr.base_addr) { .field => |field| field, else => null, }; }; const field = opt_field orelse { return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{}); }; if (Value.fromInterned(field.base).typeOf(zcu).childType(zcu).toIntern() != parent_ty.toIntern()) { return sema.fail(block, field_ptr_src, "pointer value not based on parent struct", .{}); } if (field.index != field_index) { return sema.fail(block, inst_src, "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ field_name.fmt(ip), field_index, field.index, parent_ty.fmt(pt), }); } break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); } else result: { try sema.requireRuntimeBlock(block, inst_src, field_ptr_src); break :result try block.addInst(.{ .tag = .field_parent_ptr, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(actual_parent_ptr_ty.toIntern()), .payload = try block.sema.addExtra(Air.FieldParentPtr{ .field_ptr = casted_field_ptr, .field_index = @intCast(field_index), }), } }, }); }; return sema.ptrCastFull(block, flags, inst_src, result, inst_src, parent_ptr_ty, "@fieldParentPtr"); } fn ptrSubtract(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, byte_subtract: u64, new_ty: Type) !Value { const pt = sema.pt; const zcu = pt.zcu; if (byte_subtract == 0) return pt.getCoerced(ptr_val, new_ty); var ptr = switch (zcu.intern_pool.indexToKey(ptr_val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .ptr => |ptr| ptr, else => unreachable, }; if (ptr.byte_offset < byte_subtract) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "pointer computation here causes undefined behavior", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "resulting pointer exceeds bounds of containing value which may trigger overflow", .{}); break :msg msg; }); } ptr.byte_offset -= byte_subtract; ptr.ty = new_ty.toIntern(); return Value.fromInterned(try pt.intern(.{ .ptr = ptr })); } fn zirMinMax( sema: *Sema, block: *Block, inst: Zir.Inst.Index, comptime air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const lhs_src = block.builtinCallArgSrc(inst_data.src_node, 0); const rhs_src = block.builtinCallArgSrc(inst_data.src_node, 1); const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src }); } fn zirMinMaxMulti( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, comptime air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src_node = extra.data.src_node; const src = block.nodeOffset(src_node); const operands = sema.code.refSlice(extra.end, extended.small); const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len); const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len); for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| { op_src.* = block.builtinCallArgSrc(src_node, @intCast(i)); air_ref.* = try sema.resolveInst(zir_ref); try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*)); } return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs); } fn analyzeMinMax( sema: *Sema, block: *Block, src: LazySrcLoc, comptime air_tag: Air.Inst.Tag, operands: []const Air.Inst.Ref, operand_srcs: []const LazySrcLoc, ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); const pt = sema.pt; const mod = pt.zcu; if (operands.len == 1) return operands[0]; const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, else => @compileError("unreachable"), }; // The set of runtime-known operands. Set up in the loop below. var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len); // The current minmax value - initially this will always be comptime-known, then we'll add // runtime values into the mix later. var cur_minmax: ?Air.Inst.Ref = null; var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null // The current known scalar bounds of the value. var bounds_status: enum { unknown, // We've only seen undef comptime_ints so far, so do not know the bounds. defined, // We've seen only integers, so the bounds are defined. non_integral, // There are floats in the mix, so the bounds aren't defined. } = .unknown; var cur_min_scalar: Value = undefined; var cur_max_scalar: Value = undefined; // First, find all comptime-known arguments, and get their min/max for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| { // Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call // it in the runtime path anyway since the result type may have been refined const unresolved_uncoerced_val = try sema.resolveValue(operand) orelse continue; const uncoerced_val = try sema.resolveLazyValue(unresolved_uncoerced_val); runtime_known.unset(operand_idx); switch (bounds_status) { .unknown, .defined => refine_bounds: { const ty = sema.typeOf(operand); if (!ty.scalarType(mod).isInt(mod) and !ty.scalarType(mod).eql(Type.comptime_int, mod)) { bounds_status = .non_integral; break :refine_bounds; } const scalar_bounds: ?[2]Value = bounds: { if (!ty.isVector(mod)) break :bounds try uncoerced_val.intValueBounds(pt); var cur_bounds: [2]Value = try Value.intValueBounds(try uncoerced_val.elemValue(pt, 0), pt) orelse break :bounds null; const len = try sema.usizeCast(block, src, ty.vectorLen(mod)); for (1..len) |i| { const elem = try uncoerced_val.elemValue(pt, i); const elem_bounds = try elem.intValueBounds(pt) orelse break :bounds null; cur_bounds = .{ Value.numberMin(elem_bounds[0], cur_bounds[0], pt), Value.numberMax(elem_bounds[1], cur_bounds[1], pt), }; } break :bounds cur_bounds; }; if (scalar_bounds) |bounds| { if (bounds_status == .unknown) { cur_min_scalar = bounds[0]; cur_max_scalar = bounds[1]; bounds_status = .defined; } else { cur_min_scalar = opFunc(cur_min_scalar, bounds[0], pt); cur_max_scalar = opFunc(cur_max_scalar, bounds[1], pt); } } }, .non_integral => {}, } const cur = cur_minmax orelse { cur_minmax = operand; cur_minmax_src = operand_src; continue; }; const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src); const cur_val = try sema.resolveLazyValue(simd_op.lhs_val.?); // cur_minmax is comptime-known const operand_val = try sema.resolveLazyValue(simd_op.rhs_val.?); // we checked the operand was resolvable above const vec_len = simd_op.len orelse { const result_val = opFunc(cur_val, operand_val, pt); cur_minmax = Air.internedToRef(result_val.toIntern()); continue; }; const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const lhs_elem_val = try cur_val.elemValue(pt, i); const rhs_elem_val = try operand_val.elemValue(pt, i); const uncoerced_elem = opFunc(lhs_elem_val, rhs_elem_val, pt); elem.* = (try pt.getCoerced(uncoerced_elem, simd_op.scalar_ty)).toIntern(); } cur_minmax = Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = simd_op.result_ty.toIntern(), .storage = .{ .elems = elems }, } }))); } const opt_runtime_idx = runtime_known.findFirstSet(); if (cur_minmax) |ct_minmax_ref| refine: { // Refine the comptime-known result type based on the bounds. This isn't strictly necessary // in the runtime case, since we'll refine the type again later, but keeping things as small // as possible will allow us to emit more optimal AIR (if all the runtime operands have // smaller types than the non-refined comptime type). const val = (try sema.resolveValue(ct_minmax_ref)).?; const orig_ty = sema.typeOf(ct_minmax_ref); if (opt_runtime_idx == null and orig_ty.scalarType(mod).eql(Type.comptime_int, mod)) { // If all arguments were `comptime_int`, and there are no runtime args, we'll preserve that type break :refine; } // We can't refine float types if (orig_ty.scalarType(mod).isAnyFloat()) break :refine; assert(bounds_status == .defined); // there was a non-comptime-int integral comptime-known arg const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar); const refined_ty = if (orig_ty.isVector(mod)) try pt.vectorType(.{ .len = orig_ty.vectorLen(mod), .child = refined_scalar_ty.toIntern(), }) else refined_scalar_ty; // Apply the refined type to the current value if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } cur_minmax = try sema.coerceInMemory(val, refined_ty); } const runtime_idx = opt_runtime_idx orelse return cur_minmax.?; const runtime_src = operand_srcs[runtime_idx]; try sema.requireRuntimeBlock(block, src, runtime_src); // Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the // type again at the end, based on the comptime-known bound. // If the comptime-known part is undef we can avoid emitting actual instructions later const known_undef = if (cur_minmax) |operand| blk: { const val = (try sema.resolveValue(operand)).?; break :blk val.isUndef(mod); } else false; if (cur_minmax == null) { // No comptime operands - use the first operand as the starting value assert(bounds_status == .unknown); assert(runtime_idx == 0); cur_minmax = operands[0]; cur_minmax_src = runtime_src; runtime_known.unset(0); // don't look at this operand in the loop below const scalar_ty = sema.typeOf(cur_minmax.?).scalarType(mod); if (scalar_ty.isInt(mod)) { cur_min_scalar = try scalar_ty.minInt(pt, scalar_ty); cur_max_scalar = try scalar_ty.maxInt(pt, scalar_ty); bounds_status = .defined; } else { bounds_status = .non_integral; } } var it = runtime_known.iterator(.{}); while (it.next()) |idx| { const lhs = cur_minmax.?; const lhs_src = cur_minmax_src; const rhs = operands[idx]; const rhs_src = operand_srcs[idx]; const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); if (known_undef) { cur_minmax = try pt.undefRef(simd_op.result_ty); } else { cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); } // Compute the bounds of this type switch (bounds_status) { .unknown, .defined => refine_bounds: { const scalar_ty = sema.typeOf(rhs).scalarType(mod); if (scalar_ty.isAnyFloat()) { bounds_status = .non_integral; break :refine_bounds; } const scalar_min = try scalar_ty.minInt(pt, scalar_ty); const scalar_max = try scalar_ty.maxInt(pt, scalar_ty); if (bounds_status == .unknown) { cur_min_scalar = scalar_min; cur_max_scalar = scalar_max; bounds_status = .defined; } else { cur_min_scalar = opFunc(cur_min_scalar, scalar_min, pt); cur_max_scalar = opFunc(cur_max_scalar, scalar_max, pt); } }, .non_integral => {}, } } // Finally, refine the type based on the known bounds. const unrefined_ty = sema.typeOf(cur_minmax.?); if (unrefined_ty.scalarType(mod).isAnyFloat()) { // We can't refine floats, so we're done. return cur_minmax.?; } assert(bounds_status == .defined); // there were integral runtime operands const refined_scalar_ty = try pt.intFittingRange(cur_min_scalar, cur_max_scalar); const refined_ty = if (unrefined_ty.isVector(mod)) try pt.vectorType(.{ .len = unrefined_ty.vectorLen(mod), .child = refined_scalar_ty.toIntern(), }) else refined_scalar_ty; if (!refined_ty.eql(unrefined_ty, mod)) { // We've reduced the type - cast the result down return block.addTyOp(.intcast, refined_ty, cur_minmax.?); } return cur_minmax.?; } fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); const info = ptr_ty.ptrInfo(mod); if (info.flags.size == .One) { // Already an array pointer. return ptr; } const new_ty = try pt.ptrTypeSema(.{ .child = (try pt.arrayType(.{ .len = len, .sentinel = info.sentinel, .child = info.child, })).toIntern(), .flags = .{ .alignment = info.flags.alignment, .is_const = info.flags.is_const, .is_volatile = info.flags.is_volatile, .is_allowzero = info.flags.is_allowzero, .address_space = info.flags.address_space, }, }); const non_slice_ptr = if (info.flags.size == .Slice) try block.addTyOp(.slice_ptr, ptr_ty.slicePtrFieldType(mod), ptr) else ptr; return block.addBitCast(new_ty, non_slice_ptr); } fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const dest_src = block.builtinCallArgSrc(inst_data.src_node, 0); const src_src = block.builtinCallArgSrc(inst_data.src_node, 1); const dest_ptr = try sema.resolveInst(extra.lhs); const src_ptr = try sema.resolveInst(extra.rhs); const dest_ty = sema.typeOf(dest_ptr); const src_ty = sema.typeOf(src_ptr); const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const pt = sema.pt; const mod = pt.zcu; const target = mod.getTarget(); if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } if (dest_len == .none and src_len == .none) { const msg = msg: { const msg = try sema.errMsg(src, "unknown @memcpy length", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{ dest_ty.fmt(pt), }); try sema.errNote(src_src, msg, "source type '{}' provides no length", .{ src_ty.fmt(pt), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } var len_val: ?Value = null; if (dest_len != .none and src_len != .none) check: { // If we can check at compile-time, no need for runtime safety. if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| { len_val = dest_len_val; if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| { if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) { const msg = msg: { const msg = try sema.errMsg(src, "non-matching @memcpy lengths", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "length {} here", .{ dest_len_val.fmtValueSema(pt, sema), }); try sema.errNote(src_src, msg, "length {} here", .{ src_len_val.fmtValueSema(pt, sema), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } break :check; } } else if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| { len_val = src_len_val; } if (block.wantSafety()) { const ok = try block.addBinOp(.cmp_eq, dest_len, src_len); try sema.addSafetyCheck(block, src, ok, .memcpy_len_mismatch); } } else if (dest_len != .none) { if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| { len_val = dest_len_val; } } else if (src_len != .none) { if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| { len_val = src_len_val; } } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { const len_u64 = (try len_val.?.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try pt.intRef(Type.usize, i); const dest_elem_ptr = try sema.elemPtrOneLayerOnly( block, src, dest_ptr, elem_index, src, true, // init false, // oob_safety ); const src_elem_ptr = try sema.elemPtrOneLayerOnly( block, src, src_ptr, elem_index, src, false, // init false, // oob_safety ); const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src); try sema.storePtr2( block, src, dest_elem_ptr, dest_src, uncoerced_elem, src_src, .store, ); } return; } else break :rs src_src; } else dest_src; // If in-memory coercion is not allowed, explode this memcpy call into a // for loop that copies element-wise. // Likewise if this is an iterable rather than a pointer, do the same // lowering. The AIR instruction requires pointers with element types of // equal ABI size. if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } const dest_elem_ty = dest_ty.elemType2(mod); const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src, null)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } // If the length is comptime-known, then upgrade src and destination types // into pointer-to-array. At this point we know they are both pointers // already. var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { const len = try val.toUnsignedIntSema(pt); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; } new_dest_ptr = try upgradeToArrayPtr(sema, block, dest_ptr, len); new_src_ptr = try upgradeToArrayPtr(sema, block, src_ptr, len); } if (dest_len != .none) { // Change the src from slice to a many pointer, to avoid multiple ptr // slice extractions in AIR instructions. const new_src_ptr_ty = sema.typeOf(new_src_ptr); if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } else if (dest_len == .none and len_val == null) { // Change the dest to a slice, since its type must have the length. const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr); new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, LazySrcLoc.unneeded, dest_src, dest_src, dest_src, false); const new_src_ptr_ty = sema.typeOf(new_src_ptr); if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } try sema.requireRuntimeBlock(block, src, runtime_src); // Aliasing safety check. if (block.wantSafety()) { const len = if (len_val) |v| Air.internedToRef(v.toIntern()) else if (dest_len != .none) dest_len else src_len; // Extract raw pointer from dest slice. The AIR instructions could support them, but // it would cause redundant machine code instructions. const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: { var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type; assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty) else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: { var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type; assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len // ok2: src >= dest + len const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src); const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src); const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len); const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len); const ok = try block.addBinOp(.bool_or, ok1, ok2); try sema.addSafetyCheck(block, src, ok, .memcpy_alias); } _ = try block.addInst(.{ .tag = .memcpy, .data = .{ .bin_op = .{ .lhs = new_dest_ptr, .rhs = new_src_ptr, } }, }); } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const dest_src = block.builtinCallArgSrc(inst_data.src_node, 0); const value_src = block.builtinCallArgSrc(inst_data.src_node, 1); const dest_ptr = try sema.resolveInst(extra.lhs); const uncoerced_elem = try sema.resolveInst(extra.rhs); const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } const dest_elem_ty: Type = dest_elem_ty: { const ptr_info = dest_ptr_ty.ptrInfo(mod); switch (ptr_info.flags.size) { .Slice => break :dest_elem_ty Type.fromInterned(ptr_info.child), .One => { if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) { break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(mod); } }, .Many, .C => {}, } return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unknown @memset length", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_src, msg, "destination type '{}' provides no length", .{ dest_ptr_ty.fmt(pt), }); break :msg msg; }); }; const elem = try sema.coerce(block, dest_elem_ty, uncoerced_elem, value_src); const runtime_src = rs: { const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, pt.tid, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(pt, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; } if (!sema.isComptimeMutablePtr(ptr_val)) break :rs dest_src; const elem_val = try sema.resolveValue(elem) orelse break :rs value_src; const array_ty = try pt.arrayType(.{ .child = dest_elem_ty.toIntern(), .len = len_u64, }); const array_val = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = array_ty.toIntern(), .storage = .{ .repeated_elem = elem_val.toIntern() }, } })); const array_ptr_ty = ty: { var info = dest_ptr_ty.ptrInfo(mod); info.flags.size = .One; info.child = array_ty.toIntern(); break :ty try pt.ptrType(info); }; const raw_ptr_val = if (dest_ptr_ty.isSlice(mod)) ptr_val.slicePtr(mod) else ptr_val; const array_ptr_val = try pt.getCoerced(raw_ptr_val, array_ptr_ty); return sema.storePtrVal(block, src, array_ptr_val, array_val, array_ty); }; try sema.requireRuntimeBlock(block, src, runtime_src); _ = try block.addInst(.{ .tag = if (block.wantSafety()) .memset_safe else .memset, .data = .{ .bin_op = .{ .lhs = dest_ptr, .rhs = elem, } }, }); } fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); return sema.failWithUseOfAsync(block, src); } fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); return sema.failWithUseOfAsync(block, src); } fn zirAwait( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); return sema.failWithUseOfAsync(block, src); } fn zirAwaitNosuspend( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.nodeOffset(extra.node); return sema.failWithUseOfAsync(block, src); } fn zirVarExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src = block.src(.{ .node_offset_var_decl_ty = 0 }); const init_src = block.src(.{ .node_offset_var_decl_init = 0 }); const small: Zir.Inst.ExtendedVar.Small = @bitCast(extended.small); var extra_index: usize = extra.end; const lib_name = if (small.has_lib_name) lib_name: { const lib_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const lib_name = sema.code.nullTerminatedString(lib_name_index); extra_index += 1; try sema.handleExternLibName(block, ty_src, lib_name); break :lib_name lib_name; } else null; // ZIR supports encoding this information but it is not used; the information // is encoded via the Decl entry. assert(!small.has_align); const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { const init_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(init_ref); } else .none; const have_ty = extra.data.var_type != .none; const var_ty = if (have_ty) try sema.resolveType(block, ty_src, extra.data.var_type) else sema.typeOf(uncasted_init); const init_val = if (uncasted_init != .none) blk: { const init = if (have_ty) try sema.coerce(block, var_ty, uncasted_init, init_src) else uncasted_init; break :blk ((try sema.resolveValue(init)) orelse { return sema.failWithNeededComptime(block, init_src, .{ .needed_comptime_reason = "container level variable initializers must be comptime-known", }); }).toIntern(); } else .none; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); if (small.is_extern) { // We need to resolve the alignment and addrspace early. // Keep in sync with logic in `Zcu.PerThread.semaCau`. const align_src = block.src(.{ .node_offset_var_decl_align = 0 }); const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 }); const decl_inst, const decl_bodies = decl: { const decl_inst = sema.getOwnerCauDeclInst().resolve(ip) orelse return error.AnalysisFail; const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst); break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) }; }; const alignment: InternPool.Alignment = a: { const align_body = decl_bodies.align_body orelse break :a .none; const align_ref = try sema.resolveInlineBody(block, align_body, decl_inst); break :a try sema.analyzeAsAlign(block, align_src, align_ref); }; const @"addrspace": std.builtin.AddressSpace = as: { const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(var_ty.toIntern())) { .func_type => .function, else => .variable, }; const target = zcu.getTarget(); const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), .constant => target_util.defaultAddressSpace(target, .global_constant), else => unreachable, }; const addrspace_ref = try sema.resolveInlineBody(block, addrspace_body, decl_inst); break :as try sema.analyzeAsAddressSpace(block, addrspace_src, addrspace_ref, addrspace_ctx); }; return Air.internedToRef(try pt.getExtern(.{ .name = sema.getOwnerCauNavName(), .ty = var_ty.toIntern(), .lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_const = small.is_const, .is_threadlocal = small.is_threadlocal, .is_weak_linkage = false, .alignment = alignment, .@"addrspace" = @"addrspace", .zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction .owner_nav = undefined, // ignored by `getExtern` })); } assert(!small.is_const); // non-const non-extern variable is not legal return Air.internedToRef(try pt.intern(.{ .variable = .{ .ty = var_ty.toIntern(), .init = init_val, .owner_nav = sema.getOwnerCauNav(), .lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_threadlocal = small.is_threadlocal, .is_weak_linkage = false, } })); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const mod = pt.zcu; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); const target = mod.getTarget(); const align_src = block.src(.{ .node_offset_fn_type_align = inst_data.src_node }); const addrspace_src = block.src(.{ .node_offset_fn_type_addrspace = inst_data.src_node }); const section_src = block.src(.{ .node_offset_fn_type_section = inst_data.src_node }); const cc_src = block.src(.{ .node_offset_fn_type_cc = inst_data.src_node }); const ret_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node }); const has_body = extra.data.body_len != 0; var extra_index: usize = extra.end; const lib_name: ?[]const u8 = if (extra.data.bits.has_lib_name) blk: { const lib_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]); const lib_name = sema.code.nullTerminatedString(lib_name_index); extra_index += 1; break :blk lib_name; } else null; if (has_body and (extra.data.bits.has_align_body or extra.data.bits.has_align_ref) and !target_util.supportsFunctionAlignment(target)) { return sema.fail(block, align_src, "target does not support function alignment", .{}); } const @"align": ?Alignment = if (extra.data.bits.has_align_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, .{ .needed_comptime_reason = "alignment must be comptime-known", }); if (val.isGenericPoison()) { break :blk null; } const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(pt)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const uncoerced_align = sema.resolveInst(align_ref) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const coerced_align = sema.coerce(block, Type.u29, uncoerced_align, align_src) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const align_val = sema.resolveConstDefinedValue(block, align_src, coerced_align, .{ .needed_comptime_reason = "alignment must be comptime-known", }) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(pt)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, .{ .needed_comptime_reason = "addrspace must be comptime-known", }); if (val.isGenericPoison()) { break :blk null; } break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const uncoerced_addrspace = sema.resolveInst(addrspace_ref) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const coerced_addrspace = sema.coerce(block, addrspace_ty, uncoerced_addrspace, addrspace_src) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const addrspace_val = sema.resolveConstDefinedValue(block, addrspace_src, coerced_addrspace, .{ .needed_comptime_reason = "addrspace must be comptime-known", }) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_val); } else target_util.defaultAddressSpace(target, .function); const section: Section = if (extra.data.bits.has_section_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, .{ .needed_comptime_reason = "linksection must be comptime-known", }); if (val.isGenericPoison()) { break :blk .generic; } break :blk .{ .explicit = try sema.sliceToIpString(block, section_src, val, .{ .needed_comptime_reason = "linksection must be comptime-known", }) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, .{ .needed_comptime_reason = "linksection must be comptime-known", }) catch |err| switch (err) { error.GenericPoison => { break :blk .generic; }, else => |e| return e, }; break :blk .{ .explicit = section_name }; } else .default; const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const cc_ty = try pt.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); if (val.isGenericPoison()) { break :blk null; } break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const cc_ty = try pt.getBuiltinType("CallingConvention"); const uncoerced_cc = sema.resolveInst(cc_ref) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const coerced_cc = sema.coerce(block, cc_ty, uncoerced_cc, cc_src) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; const cc_val = sema.resolveConstDefinedValue(block, cc_src, coerced_cc, .{ .needed_comptime_reason = "calling convention must be comptime-known", }) catch |err| switch (err) { error.GenericPoison => break :blk null, else => |e| return e, }; break :blk mod.toEnum(std.builtin.CallingConvention, cc_val); } else cc: { if (has_body) { const decl_inst = if (sema.generic_owner != .none) decl_inst: { // Generic instance -- use the original function declaration to // look for the `export` syntax. const nav = mod.intern_pool.getNav(mod.funcInfo(sema.generic_owner).owner_nav); const cau = mod.intern_pool.getCau(nav.analysis_owner.unwrap().?); break :decl_inst cau.zir_index; } else sema.getOwnerCauDeclInst(); // not an instantiation so we're analyzing a function declaration Cau const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool) orelse return error.AnalysisFail)[0]; if (zir_decl.flags.is_export) { break :cc .C; } } break :cc .Unspecified; }; const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{ .needed_comptime_reason = "return type must be comptime-known", }); const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]); extra_index += 1; const ret_ty_val = sema.resolveInstConst(block, ret_src, ret_ty_ref, .{ .needed_comptime_reason = "return type must be comptime-known", }) catch |err| switch (err) { error.GenericPoison => { break :blk Type.generic_poison; }, else => |e| return e, }; break :blk ret_ty_val.toType(); } else Type.void; const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: { const x = sema.code.extra[extra_index]; extra_index += 1; break :blk x; } else 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; if (has_body) { extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } const is_var_args = extra.data.bits.is_var_args; const is_inferred_error = extra.data.bits.is_inferred_error; const is_extern = extra.data.bits.is_extern; const is_noinline = extra.data.bits.is_noinline; return sema.funcCommon( block, inst_data.src_node, inst, @"align", @"addrspace", section, cc, ret_ty, is_var_args, is_inferred_error, is_extern, has_body, src_locs, lib_name, noalias_bits, is_noinline, ); } fn zirCUndef( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.builtinCallArgSrc(extra.node, 0); const name = try sema.resolveConstString(block, src, extra.operand, .{ .needed_comptime_reason = "name of macro being undefined must be comptime-known", }); try block.c_import_buf.?.writer().print("#undef {s}\n", .{name}); return .void_value; } fn zirCInclude( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = block.builtinCallArgSrc(extra.node, 0); const name = try sema.resolveConstString(block, src, extra.operand, .{ .needed_comptime_reason = "path being included must be comptime-known", }); try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name}); return .void_value; } fn zirCDefine( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src = block.builtinCallArgSrc(extra.node, 0); const val_src = block.builtinCallArgSrc(extra.node, 1); const name = try sema.resolveConstString(block, name_src, extra.lhs, .{ .needed_comptime_reason = "name of macro being undefined must be comptime-known", }); const rhs = try sema.resolveInst(extra.rhs); if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, .{ .needed_comptime_reason = "value of macro being undefined must be comptime-known", }); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { try block.c_import_buf.?.writer().print("#define {s}\n", .{name}); } return .void_value; } fn zirWasmMemorySize( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const index_src = block.builtinCallArgSrc(extra.node, 0); const builtin_src = block.nodeOffset(extra.node); const target = sema.pt.zcu.getTarget(); if (!target.isWasm()) { return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, .{ .needed_comptime_reason = "wasm memory size index must be comptime-known", })); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = .wasm_memory_size, .data = .{ .pl_op = .{ .operand = .none, .payload = index, } }, }); } fn zirWasmMemoryGrow( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const builtin_src = block.nodeOffset(extra.node); const index_src = block.builtinCallArgSrc(extra.node, 0); const delta_src = block.builtinCallArgSrc(extra.node, 1); const target = sema.pt.zcu.getTarget(); if (!target.isWasm()) { return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)}); } const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{ .needed_comptime_reason = "wasm memory size index must be comptime-known", })); const delta = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.rhs), delta_src); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = .wasm_memory_grow, .data = .{ .pl_op = .{ .operand = delta, .payload = index, } }, }); } fn resolvePrefetchOptions( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const options_ty = try pt.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const locality_src = block.src(.{ .init_field_locality = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const cache_src = block.src(.{ .init_field_cache = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "rw", .no_embedded_nulls), rw_src); const rw_val = try sema.resolveConstDefinedValue(block, rw_src, rw, .{ .needed_comptime_reason = "prefetch read/write must be comptime-known", }); const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "locality", .no_embedded_nulls), locality_src); const locality_val = try sema.resolveConstDefinedValue(block, locality_src, locality, .{ .needed_comptime_reason = "prefetch locality must be comptime-known", }); const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "cache", .no_embedded_nulls), cache_src); const cache_val = try sema.resolveConstDefinedValue(block, cache_src, cache, .{ .needed_comptime_reason = "prefetch cache must be comptime-known", }); return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), .locality = @intCast(try locality_val.toUnsignedIntSema(pt)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } fn zirPrefetch( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ptr_src = block.builtinCallArgSrc(extra.node, 0); const opts_src = block.builtinCallArgSrc(extra.node, 1); const ptr = try sema.resolveInst(extra.lhs); try sema.checkPtrOperand(block, ptr_src, sema.typeOf(ptr)); const options = try sema.resolvePrefetchOptions(block, opts_src, extra.rhs); if (!block.is_comptime) { _ = try block.addInst(.{ .tag = .prefetch, .data = .{ .prefetch = .{ .ptr = ptr, .rw = options.rw, .locality = options.locality, .cache = options.cache, } }, }); } return .void_value; } fn resolveExternOptions( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!struct { name: InternPool.NullTerminatedString, library_name: InternPool.OptionalNullTerminatedString = .none, linkage: std.builtin.GlobalLinkage = .strong, is_thread_local: bool = false, } { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try pt.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const library_src = block.src(.{ .init_field_library = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const linkage_src = block.src(.{ .init_field_linkage = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const thread_local_src = block.src(.{ .init_field_thread_local = src.offset.node_offset_builtin_call_arg.builtin_call_node }); const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "name", .no_embedded_nulls), name_src); const name = try sema.toConstString(block, name_src, name_ref, .{ .needed_comptime_reason = "name of the extern symbol must be comptime-known", }); const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "library_name", .no_embedded_nulls), library_src); const library_name_val = try sema.resolveConstDefinedValue(block, library_src, library_name_inst, .{ .needed_comptime_reason = "library in which extern symbol is must be comptime-known", }); const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "linkage", .no_embedded_nulls), linkage_src); const linkage_val = try sema.resolveConstDefinedValue(block, linkage_src, linkage_ref, .{ .needed_comptime_reason = "linkage of the extern symbol must be comptime-known", }); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, pt.tid, "is_thread_local", .no_embedded_nulls), thread_local_src); const is_thread_local_val = try sema.resolveConstDefinedValue(block, thread_local_src, is_thread_local, .{ .needed_comptime_reason = "threadlocality of the extern symbol must be comptime-known", }); const library_name = if (library_name_val.optionalValue(mod)) |library_name_payload| library_name: { const library_name = try sema.toConstString(block, library_src, Air.internedToRef(library_name_payload.toIntern()), .{ .needed_comptime_reason = "library in which extern symbol is must be comptime-known", }); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } try sema.handleExternLibName(block, library_src, library_name); break :library_name library_name; } else null; if (name.len == 0) { return sema.fail(block, name_src, "extern symbol name cannot be empty", .{}); } if (linkage != .weak and linkage != .strong) { return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{}); } return .{ .name = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls), .library_name = try ip.getOrPutStringOpt(gpa, pt.tid, library_name, .no_embedded_nulls), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; } fn zirBuiltinExtern( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src = block.builtinCallArgSrc(extra.node, 0); const options_src = block.builtinCallArgSrc(extra.node, 1); var ty = try sema.resolveType(block, ty_src, extra.lhs); if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } if (!try sema.validateExternType(ty, .other)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const options = try sema.resolveExternOptions(block, options_src, extra.rhs); // TODO: error for threadlocal functions, non-const functions, etc if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) { ty = try pt.optionalType(ty.toIntern()); } const ptr_info = ty.ptrInfo(mod); const extern_val = try pt.getExtern(.{ .name = options.name, .ty = ptr_info.child, .lib_name = options.library_name, .is_const = ptr_info.flags.is_const, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .weak, .alignment = ptr_info.flags.alignment, .@"addrspace" = ptr_info.flags.address_space, // This instruction is just for source locations. // `builtin_extern` doesn't provide enough information, and isn't currently tracked. // So, for now, just use our containing `declaration`. .zir_index = switch (sema.owner.unwrap()) { .cau => sema.getOwnerCauDeclInst(), .func => sema.getOwnerFuncDeclInst(), }, .owner_nav = undefined, // ignored by `getExtern` }); const extern_nav = ip.indexToKey(extern_val).@"extern".owner_nav; return Air.internedToRef((try pt.getCoerced(Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = switch (ip.indexToKey(ty.toIntern())) { .ptr_type => ty.toIntern(), .opt_type => |child_type| child_type, else => unreachable, }, .base_addr = .{ .nav = extern_nav }, .byte_offset = 0, } })), ty)).toIntern()); } fn zirWorkItem( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, zir_tag: Zir.Inst.Extended, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const dimension_src = block.builtinCallArgSrc(extra.node, 0); const builtin_src = block.nodeOffset(extra.node); const target = sema.pt.zcu.getTarget(); switch (target.cpu.arch) { // TODO: Allow for other GPU targets. .amdgcn, .spirv, .spirv64, .spirv32, .nvptx, .nvptx64 => {}, else => { return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)}); }, } const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, .{ .needed_comptime_reason = "dimension must be comptime-known", })); try sema.requireRuntimeBlock(block, builtin_src, null); return block.addInst(.{ .tag = switch (zir_tag) { .work_item_id => .work_item_id, .work_group_size => .work_group_size, .work_group_id => .work_group_id, else => unreachable, }, .data = .{ .pl_op = .{ .operand = .none, .payload = dimension, } }, }); } fn zirInComptime( sema: *Sema, block: *Block, ) CompileError!Air.Inst.Ref { _ = sema; return if (block.is_comptime) .bool_true else .bool_false; } fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const value: Zir.Inst.BuiltinValue = @enumFromInt(extended.small); const type_name = switch (value) { .atomic_order => "AtomicOrder", .atomic_rmw_op => "AtomicRmwOp", .calling_convention => "CallingConvention", .address_space => "AddressSpace", .float_mode => "FloatMode", .reduce_op => "ReduceOp", .call_modifier => "CallModifier", .prefetch_options => "PrefetchOptions", .export_options => "ExportOptions", .extern_options => "ExternOptions", .type_info => "Type", // Values are handled here. .calling_convention_c => { const callconv_ty = try pt.getBuiltinType("CallingConvention"); comptime assert(@intFromEnum(std.builtin.CallingConvention.C) == 1); const val = try pt.intern(.{ .enum_tag = .{ .ty = callconv_ty.toIntern(), .int = .one_u8, } }); return Air.internedToRef(val); }, .calling_convention_inline => { const callconv_ty = try pt.getBuiltinType("CallingConvention"); comptime assert(@intFromEnum(std.builtin.CallingConvention.Inline) == 4); const val = try pt.intern(.{ .enum_tag = .{ .ty = callconv_ty.toIntern(), .int = .four_u8, } }); return Air.internedToRef(val); }, }; const ty = try pt.getBuiltinType(type_name); return Air.internedToRef(ty.toIntern()); } fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc, runtime_src: ?LazySrcLoc) !void { if (block.is_comptime) { const msg = msg: { const msg = try sema.errMsg(src, "unable to evaluate comptime expression", .{}); errdefer msg.destroy(sema.gpa); if (runtime_src) |some| { try sema.errNote(some, msg, "operation is runtime due to this operand", .{}); } if (block.comptime_reason) |some| { try some.explain(sema, msg); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } /// Emit a compile error if type cannot be used for a runtime variable. fn validateVarType( sema: *Sema, block: *Block, src: LazySrcLoc, var_ty: Type, is_extern: bool, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; if (is_extern) { if (!try sema.validateExternType(var_ty, .other)) { const msg = msg: { const msg = try sema.errMsg(src, "extern variable cannot have type '{}'", .{var_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, src, var_ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } else { if (var_ty.zigTypeTag(mod) == .Opaque) { return sema.fail( block, src, "non-extern variable with opaque type '{}'", .{var_ty.fmt(pt)}, ); } } if (!try sema.typeRequiresComptime(var_ty)) return; const msg = msg: { const msg = try sema.errMsg(src, "variable of type '{}' must be const or comptime", .{var_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, src, var_ty); if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void); fn explainWhyTypeIsComptime( sema: *Sema, msg: *Module.ErrorMsg, src_loc: LazySrcLoc, ty: Type, ) CompileError!void { var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); try ty.resolveFully(sema.pt); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } fn explainWhyTypeIsComptimeInner( sema: *Sema, msg: *Module.ErrorMsg, src_loc: LazySrcLoc, ty: Type, type_set: *TypeSet, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, .ErrorSet, .Enum, .Frame, .AnyFrame, .Void, => return, .Fn => { try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{ty.fmt(pt)}); }, .Type => { try sema.errNote(src_loc, msg, "types are not available at runtime", .{}); }, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .NoReturn, .Undefined, .Null, => return, .Opaque => { try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(pt)}); }, .Array, .Vector => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { const elem_ty = ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Fn) { const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try sema.errNote(src_loc, msg, "function is generic", .{}); } switch (fn_info.cc) { .Inline => try sema.errNote(src_loc, msg, "function has inline calling convention", .{}), else => {}, } if (Type.fromInterned(fn_info.return_type).comptimeOnly(pt)) { try sema.errNote(src_loc, msg, "function has a comptime-only return type", .{}); } return; } try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set); }, .Struct => { if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; if (mod.typeToStruct(ty)) |struct_type| { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); const field_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .container_field_type = @intCast(i) }, }; if (try sema.typeRequiresComptime(field_ty)) { try sema.errNote(field_src, msg, "struct requires comptime because of this field", .{}); try sema.explainWhyTypeIsComptimeInner(msg, field_src, field_ty, type_set); } } } // TODO tuples }, .Union => { if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; if (mod.typeToUnion(ty)) |union_obj| { for (0..union_obj.field_types.len) |i| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]); const field_src: LazySrcLoc = .{ .base_node_inst = union_obj.zir_index, .offset = .{ .container_field_type = @intCast(i) }, }; if (try sema.typeRequiresComptime(field_ty)) { try sema.errNote(field_src, msg, "union requires comptime because of this field", .{}); try sema.explainWhyTypeIsComptimeInner(msg, field_src, field_ty, type_set); } } } }, } } const ExternPosition = enum { ret_ty, param_ty, union_field, struct_field, element, other, }; /// Returns true if `ty` is allowed in extern types. /// Does *NOT* require `ty` to be resolved in any way. /// Calls `resolveLayout` for packed containers. fn validateExternType( sema: *Sema, ty: Type, position: ExternPosition, ) !bool { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Undefined, .Null, .ErrorUnion, .ErrorSet, .Frame, => return false, .Void => return position == .union_field or position == .ret_ty or position == .struct_field or position == .element, .NoReturn => return position == .ret_ty, .Opaque, .Bool, .Float, .AnyFrame, => return true, .Pointer => { if (ty.childType(mod).zigTypeTag(mod) == .Fn) { return ty.isConstPtr(mod) and try sema.validateExternType(ty.childType(mod), .other); } return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)); }, .Int => switch (ty.intInfo(mod).bits) { 0, 8, 16, 32, 64, 128 => return true, else => return false, }, .Fn => { if (position != .other) return false; const target = mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { return sema.validateExternType(ty.intTagType(mod), position); }, .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { const bit_size = try ty.bitSizeAdvanced(pt, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, } }, .auto => return !(try sema.typeHasRuntimeBits(ty)), }, .Array => { if (position == .ret_ty or position == .param_ty) return false; return sema.validateExternType(ty.elemType2(mod), .element); }, .Vector => return sema.validateExternType(ty.elemType2(mod), .element), .Optional => return ty.isPtrLikeOptional(mod), } } fn explainWhyTypeIsNotExtern( sema: *Sema, msg: *Module.ErrorMsg, src_loc: LazySrcLoc, ty: Type, position: ExternPosition, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, .AnyFrame, => return, .Type, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Undefined, .Null, .ErrorUnion, .ErrorSet, .Frame, => return, .Pointer => { if (ty.isSlice(mod)) { try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { const pointee_ty = ty.childType(mod); if (!ty.isConstPtr(mod) and pointee_ty.zigTypeTag(mod) == .Fn) { try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{}); } else if (try sema.typeRequiresComptime(ty)) { try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(pt)}); try sema.explainWhyTypeIsComptime(msg, src_loc, ty); } try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other); } }, .Void => try sema.errNote(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try sema.errNote(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{}); } else { try sema.errNote(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{}); }, .Fn => { if (position != .other) { try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}); try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } switch (ty.fnCallingConvention(mod)) { .Unspecified => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}), .Inline => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}), else => return, } }, .Enum => { const tag_ty = ty.intTagType(mod); try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(pt)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, .Struct => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}), .Union => try sema.errNote(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}), .Array => { if (position == .ret_ty) { return sema.errNote(src_loc, msg, "arrays are not allowed as a return type", .{}); } else if (position == .param_ty) { return sema.errNote(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try sema.errNote(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does not require `ty` to be resolved in any way, but may resolve whether it is comptime-only. fn validatePackedType(sema: *Sema, ty: Type) !bool { const pt = sema.pt; const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { .Type, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Undefined, .Null, .ErrorUnion, .ErrorSet, .Frame, .NoReturn, .Opaque, .AnyFrame, .Fn, .Array, => false, .Optional => return ty.isPtrLikeOptional(zcu), .Void, .Bool, .Float, .Int, .Vector, => true, .Enum => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).tag_mode) { .auto => false, .explicit, .nonexhaustive => true, }, .Pointer => !ty.isSlice(zcu) and !try sema.typeRequiresComptime(ty), .Struct, .Union => ty.containerLayout(zcu) == .@"packed", }; } fn explainWhyTypeIsNotPacked( sema: *Sema, msg: *Module.ErrorMsg, src_loc: LazySrcLoc, ty: Type, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, .Int, .Vector, .Enum, => return, .Type, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Undefined, .Null, .Frame, .NoReturn, .Opaque, .ErrorUnion, .ErrorSet, .AnyFrame, .Optional, .Array, => try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}), .Pointer => if (ty.isSlice(mod)) { try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { try sema.errNote(src_loc, msg, "comptime-only pointer has no guaranteed in-memory representation", .{}); try sema.explainWhyTypeIsComptime(msg, src_loc, ty); }, .Fn => { try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}); try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{}); }, .Struct => try sema.errNote(src_loc, msg, "only packed structs layout are allowed in packed types", .{}), .Union => try sema.errNote(src_loc, msg, "only packed unions layout are allowed in packed types", .{}), } } fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void { const pt = sema.pt; const mod = pt.zcu; if (mod.panic_func_index == .none) { const fn_ref = try sema.analyzeNavVal(block, src, try pt.getBuiltinNav("panic")); const fn_val = try sema.resolveConstValue(block, src, fn_ref, .{ .needed_comptime_reason = "panic handler must be comptime-known", }); assert(fn_val.typeOf(mod).zigTypeTag(mod) == .Fn); assert(try sema.fnHasRuntimeBits(fn_val.typeOf(mod))); try mod.ensureFuncBodyAnalysisQueued(fn_val.toIntern()); mod.panic_func_index = fn_val.toIntern(); } if (mod.null_stack_trace == .none) { const stack_trace_ty = try pt.getBuiltinType("StackTrace"); try stack_trace_ty.resolveFields(pt); const target = mod.getTarget(); const ptr_stack_trace_ty = try pt.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), }, }); const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); mod.null_stack_trace = try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), .val = .none, } }); } } /// Backends depend on panic decls being available when lowering safety-checked /// instructions. This function ensures the panic function will be available to /// be called during that time. fn preparePanicId(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) !InternPool.Nav.Index { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; try sema.prepareSimplePanic(block, src); const panic_messages_ty = try pt.getBuiltinType("panic_messages"); const msg_nav_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, panic_messages_ty.getNamespaceIndex(mod), try mod.intern_pool.getOrPutString(gpa, pt.tid, @tagName(panic_id), .no_embedded_nulls), ) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.panic_messages is corrupt"), error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, error.OutOfMemory => |e| return e, }).?; try sema.ensureNavResolved(src, msg_nav_index); mod.panic_messages[@intFromEnum(panic_id)] = msg_nav_index.toOptional(); return msg_nav_index; } fn addSafetyCheck( sema: *Sema, parent_block: *Block, src: LazySrcLoc, ok: Air.Inst.Ref, panic_id: Module.PanicId, ) !void { const gpa = sema.gpa; assert(!parent_block.is_comptime); var fail_block: Block = .{ .parent = parent_block, .sema = sema, .namespace = parent_block.namespace, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = false, .src_base_inst = parent_block.src_base_inst, .type_name_ctx = parent_block.type_name_ctx, }; defer fail_block.instructions.deinit(gpa); try sema.safetyPanic(&fail_block, src, panic_id); try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); } fn addSafetyCheckExtra( sema: *Sema, parent_block: *Block, ok: Air.Inst.Ref, fail_block: *Block, ) !void { const gpa = sema.gpa; try parent_block.instructions.ensureUnusedCapacity(gpa, 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 1 + // The main block only needs space for the cond_br. @typeInfo(Air.CondBr).Struct.fields.len + 1 + // The ok branch of the cond_br only needs space for the br. fail_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 3); const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); const cond_br_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(block_inst) + 1); const br_inst: Air.Inst.Index = @enumFromInt(@intFromEnum(cond_br_inst) + 1); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1, }), } }, }); sema.air_extra.appendAssumeCapacity(@intFromEnum(cond_br_inst)); sema.air_instructions.appendAssumeCapacity(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = ok, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = 1, .else_body_len = @intCast(fail_block.instructions.items.len), }), } }, }); sema.air_extra.appendAssumeCapacity(@intFromEnum(br_inst)); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(fail_block.instructions.items)); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = block_inst, .operand = .void_value, } }, }); parent_block.instructions.appendAssumeCapacity(block_inst); } fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, operation: CallOperation) !void { const pt = sema.pt; const mod = pt.zcu; if (!mod.backendSupportsFeature(.panic_fn)) { _ = try block.addNoOp(.trap); return; } try sema.prepareSimplePanic(block, src); const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeNavVal(block, src, panic_func.owner_nav); const null_stack_trace = Air.internedToRef(mod.null_stack_trace); const opt_usize_ty = try pt.optionalType(.usize_type); const null_ret_addr = Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = opt_usize_ty.toIntern(), .val = .none, } }))); try sema.callBuiltin(block, src, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }, operation); } fn panicUnwrapError( sema: *Sema, parent_block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, unwrap_err_tag: Air.Inst.Tag, is_non_err_tag: Air.Inst.Tag, ) !void { const pt = sema.pt; assert(!parent_block.is_comptime); const ok = try parent_block.addUnOp(is_non_err_tag, operand); if (!pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .unwrap_error); } const gpa = sema.gpa; var fail_block: Block = .{ .parent = parent_block, .sema = sema, .namespace = parent_block.namespace, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = false, .src_base_inst = parent_block.src_base_inst, .type_name_ctx = parent_block.type_name_ctx, }; defer fail_block.instructions.deinit(gpa); { if (!pt.zcu.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { const panic_fn = try sema.pt.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; try sema.callBuiltin(&fail_block, src, panic_fn, .auto, &args, .@"safety check"); } } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); } fn panicIndexOutOfBounds( sema: *Sema, parent_block: *Block, src: LazySrcLoc, index: Air.Inst.Ref, len: Air.Inst.Ref, cmp_op: Air.Inst.Tag, ) !void { assert(!parent_block.is_comptime); const ok = try parent_block.addBinOp(cmp_op, index, len); if (!sema.pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .index_out_of_bounds); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicOutOfBounds", &.{ index, len }); } fn panicInactiveUnionField( sema: *Sema, parent_block: *Block, src: LazySrcLoc, active_tag: Air.Inst.Ref, wanted_tag: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); const ok = try parent_block.addBinOp(.cmp_eq, active_tag, wanted_tag); if (!sema.pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .inactive_union_field); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicInactiveUnionField", &.{ active_tag, wanted_tag }); } fn panicSentinelMismatch( sema: *Sema, parent_block: *Block, src: LazySrcLoc, maybe_sentinel: ?Value, sentinel_ty: Type, ptr: Air.Inst.Ref, sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); const pt = sema.pt; const mod = pt.zcu; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = Air.internedToRef(expected_sentinel_val.toIntern()); const ptr_ty = sema.typeOf(ptr); const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { const elem_ptr_ty = try ptr_ty.elemPtrType(null, pt); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ .tag = .reduce, .data = .{ .reduce = .{ .operand = eql, .operation = .And, } }, }); } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try pt.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; }; if (!pt.zcu.comp.formatted_panics) { return sema.addSafetyCheck(parent_block, src, ok, .sentinel_mismatch); } try sema.safetyCheckFormatted(parent_block, src, ok, "panicSentinelMismatch", &.{ expected_sentinel, actual_sentinel }); } fn safetyCheckFormatted( sema: *Sema, parent_block: *Block, src: LazySrcLoc, ok: Air.Inst.Ref, func: []const u8, args: []const Air.Inst.Ref, ) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; assert(zcu.comp.formatted_panics); const gpa = sema.gpa; var fail_block: Block = .{ .parent = parent_block, .sema = sema, .namespace = parent_block.namespace, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = false, .src_base_inst = parent_block.src_base_inst, .type_name_ctx = parent_block.type_name_ctx, }; defer fail_block.instructions.deinit(gpa); if (!zcu.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { const panic_fn = try pt.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); } fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) CompileError!void { const msg_nav_index = try sema.preparePanicId(block, src, panic_id); const msg_inst = try sema.analyzeNavVal(block, src, msg_nav_index); try sema.panicWithMsg(block, src, msg_inst, .@"safety check"); } fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { const msg = try sema.errMsg( src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}, ); try sema.errNote( src, msg, "use @setEvalBranchQuota() to raise the branch limit from {d}", .{sema.branch_quota}, ); return sema.failWithOwnedErrorMsg(block, msg); } } fn fieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; switch (inner_ty.zigTypeTag(mod)) { .Array => { if (field_name.eqlSlice("len", ip)) { return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); const result_ty = try pt.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ .size = .Many, .alignment = ptr_info.flags.alignment, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, .vector_index = ptr_info.flags.vector_index, }, .packed_offset = ptr_info.packed_offset, }); return sema.coerce(block, result_ty, object, src); } else { return sema.fail( block, field_name_src, "no member named '{}' in '{}'", .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, .Pointer => { const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.flags.size == .Slice) { if (field_name.eqlSlice("ptr", ip)) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, object_src, slice, inner_ty); } else if (field_name.eqlSlice("len", ip)) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSliceLen(block, src, slice); } else { return sema.fail( block, field_name_src, "no member named '{}' in '{}'", .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } } }, .Type => { const dereffed_type = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; const child_type = val.toType(); switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, field_name) != null) break :blk; return sema.fail(block, src, "no error named '{}' in '{}'", .{ field_name.fmt(ip), child_type.fmt(pt), }); }, .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, .simple_type => |t| { assert(t == .anyerror); _ = try pt.getErrorValue(field_name); }, else => unreachable, } const error_set_type = if (!child_type.isAnyError(mod)) child_type else try pt.singleErrorSetType(field_name); return Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, } }))); }, .Union => { if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } try child_type.resolveFields(pt); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); return Air.internedToRef((try pt.enumValueFieldIndex(enum_ty, field_index)).toIntern()); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index: u32 = @intCast(field_index_usize); const enum_val = try pt.enumValueFieldIndex(child_type, field_index); return Air.internedToRef(enum_val.toIntern()); }, .Struct, .Opaque => { switch (child_type.toIntern()) { .empty_struct_type, .anyopaque_type => {}, // no namespace else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; }, } return sema.failWithBadMemberAccess(block, child_type, src, field_name); }, else => return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{}); break :msg msg; }), } }, .Struct => if (is_pointer_to) { // Avoid loading the entire struct by fetching a pointer and loading that const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, .Union => if (is_pointer_to) { // Avoid loading the entire union by fetching a pointer and loading that const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, else => {}, } return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name); } fn fieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, initializing: bool, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.childType(mod), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(pt)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; switch (inner_ty.zigTypeTag(mod)) { .Array => { if (field_name.eqlSlice("len", ip)) { const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(mod)); return uavRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); const new_ptr_ty = try pt.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ .size = .Many, .alignment = ptr_info.flags.alignment, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, .vector_index = ptr_info.flags.vector_index, }, .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); const result_ty = try pt.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ .alignment = ptr_ptr_info.flags.alignment, .is_const = ptr_ptr_info.flags.is_const, .is_volatile = ptr_ptr_info.flags.is_volatile, .is_allowzero = ptr_ptr_info.flags.is_allowzero, .address_space = ptr_ptr_info.flags.address_space, .vector_index = ptr_ptr_info.flags.vector_index, }, .packed_offset = ptr_ptr_info.packed_offset, }); return sema.bitCast(block, result_ty, object_ptr, src, null); } else { return sema.fail( block, field_name_src, "no member named '{}' in '{}'", .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, .Pointer => if (inner_ty.isSlice(mod)) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); const result_ty = try pt.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), .is_volatile = attr_ptr_ty.isVolatilePtr(mod), .address_space = attr_ptr_ty.ptrAddressSpace(mod), }, }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); const field_ptr = try block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { const result_ty = try pt.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), .is_volatile = attr_ptr_ty.isVolatilePtr(mod), .address_space = attr_ptr_ty.ptrAddressSpace(mod), }, }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return Air.internedToRef((try val.ptrField(Value.slice_len_index, pt)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); const field_ptr = try block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr); try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else { return sema.fail( block, field_name_src, "no member named '{}' in '{}'", .{ field_name.fmt(ip), object_ty.fmt(pt) }, ); } }, .Type => { _ = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, object_ptr, undefined); const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); const inner = if (is_pointer_to) try sema.analyzeLoad(block, src, result, object_ptr_src) else result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; const child_type = val.toType(); switch (child_type.zigTypeTag(mod)) { .ErrorSet => { switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, field_name) != null) { break :blk; } return sema.fail(block, src, "no error named '{}' in '{}'", .{ field_name.fmt(ip), child_type.fmt(pt), }); }, .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, .simple_type => |t| { assert(t == .anyerror); _ = try pt.getErrorValue(field_name); }, else => unreachable, } const error_set_type = if (!child_type.isAnyError(mod)) child_type else try pt.singleErrorSetType(field_name); return uavRef(sema, try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, } })); }, .Union => { if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } try child_type.resolveFields(pt); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); const idx_val = try pt.enumValueFieldIndex(enum_ty, field_index_u32); return uavRef(sema, idx_val.toIntern()); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32: u32 = @intCast(field_index); const idx_val = try pt.enumValueFieldIndex(child_type, field_index_u32); return uavRef(sema, idx_val.toIntern()); }, .Struct, .Opaque => { if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(pt)}), } }, .Struct => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; const field_ptr = try sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing); try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; }, .Union => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; const field_ptr = try sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty, initializing); try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; }, else => {}, } return sema.failWithInvalidFieldAccess(block, src, object_ty, field_name); } const ResolvedFieldCallee = union(enum) { /// The LHS of the call was an actual field with this value. direct: Air.Inst.Ref, /// This is a method call, with the function and first argument given. method: struct { func_inst: Air.Inst.Ref, arg0_inst: Air.Inst.Ref, }, }; fn fieldCallBind( sema: *Sema, block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!ResolvedFieldCallee { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag(zcu) == .Pointer and (raw_ptr_ty.ptrSize(zcu) == .One or raw_ptr_ty.ptrSize(zcu) == .C)) raw_ptr_ty.childType(zcu) else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(pt)}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag(zcu) == .Pointer and inner_ty.ptrSize(zcu) == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType(zcu) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) else raw_ptr; find_field: { switch (concrete_ty.zigTypeTag(zcu)) { .Struct => { try concrete_ty.resolveFields(pt); if (zcu.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr); } else if (concrete_ty.isTuple(zcu)) { if (field_name.eqlSlice("len", ip)) { return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(zcu)) }; } if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field; return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, zcu), field_index, object_ptr); } } else { const max = concrete_ty.structFieldCount(zcu); for (0..max) |i_usize| { const i: u32 = @intCast(i_usize); if (field_name == concrete_ty.structFieldName(i, zcu).unwrap().?) { return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, zcu), i, object_ptr); } } } }, .Union => { try concrete_ty.resolveFields(pt); const union_obj = zcu.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) }; }, .Type => { const namespace = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .direct = try sema.fieldVal(block, src, namespace, field_name, field_name_src) }; }, else => {}, } } // If we get here, we need to look for a decl in the struct type instead. const found_nav = found_nav: { const namespace = concrete_ty.getNamespace(zcu).unwrap() orelse break :found_nav null; const nav_index = try sema.namespaceLookup(block, src, namespace, field_name) orelse break :found_nav null; const decl_val = try sema.analyzeNavVal(block, src, nav_index); const decl_type = sema.typeOf(decl_val); if (zcu.typeToFunc(decl_type)) |func_type| f: { if (func_type.param_types.len == 0) break :f; const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]); if (first_param_type.isGenericPoison() or (first_param_type.zigTypeTag(zcu) == .Pointer and (first_param_type.ptrSize(zcu) == .One or first_param_type.ptrSize(zcu) == .C) and first_param_type.childType(zcu).eql(concrete_ty, zcu))) { // Note that if the param type is generic poison, we know that it must // specifically be `anytype` since it's the first parameter, meaning we // can safely assume it can be a pointer. // TODO: bound fn calls on rvalues should probably // generate a by-value argument somehow. return .{ .method = .{ .func_inst = decl_val, .arg0_inst = object_ptr, } }; } else if (first_param_type.eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; } else if (first_param_type.zigTypeTag(zcu) == .Optional) { const child = first_param_type.optionalChild(zcu); if (child.eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; } else if (child.zigTypeTag(zcu) == .Pointer and child.ptrSize(zcu) == .One and child.childType(zcu).eql(concrete_ty, zcu)) { return .{ .method = .{ .func_inst = decl_val, .arg0_inst = object_ptr, } }; } } else if (first_param_type.zigTypeTag(zcu) == .ErrorUnion and first_param_type.errorUnionPayload(zcu).eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; } } break :found_nav nav_index; }; const msg = msg: { const msg = try sema.errMsg(src, "no field or member function named '{}' in '{}'", .{ field_name.fmt(ip), concrete_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_nav) |nav_index| { try sema.errNote( zcu.navSrcLoc(nav_index), msg, "'{}' is not a member function", .{field_name.fmt(ip)}, ); } if (concrete_ty.zigTypeTag(zcu) == .ErrorUnion) { try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); } if (is_double_ptr) { try sema.errNote(src, msg, "method invocation only supports up to one level of implicit pointer dereferencing", .{}); try sema.errNote(src, msg, "use '.*' to dereference pointer", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn finishFieldCallBind( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_ty: Type, field_ty: Type, field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { const pt = sema.pt; const mod = pt.zcu; const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), .address_space = ptr_ty.ptrAddressSpace(mod), }, }); const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { try container_ty.resolveStructFieldInits(pt); const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const ptr_val = try struct_ptr_val.ptrField(field_index, pt); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } try sema.requireRuntimeBlock(block, src, null); const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty); return .{ .direct = try sema.analyzeLoad(block, src, ptr_inst, src) }; } fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: InternPool.NamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?InternPool.Nav.Index { const pt = sema.pt; const zcu = pt.zcu; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| { if (!lookup.accessible) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "'{}' is not marked 'pub'", .{ decl_name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); try sema.errNote(zcu.navSrcLoc(lookup.nav), msg, "declared here", .{}); break :msg msg; }); } return lookup.nav; } return null; } fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: InternPool.NamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const nav = try sema.namespaceLookup(block, src, namespace, decl_name) orelse return null; return try sema.analyzeNavRef(src, nav); } fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: InternPool.NamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const nav = try sema.namespaceLookup(block, src, namespace, decl_name) orelse return null; return try sema.analyzeNavVal(block, src, nav); } fn structFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); try struct_ty.resolveFields(pt); try struct_ty.resolveLayout(pt); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } else if (struct_ty.isAnonStruct(mod)) { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } const struct_type = mod.typeToStruct(struct_ty).?; const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing); } fn structFieldPtrByIndex( sema: *Sema, block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, field_index: u32, field_src: LazySrcLoc, struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { const val = try struct_ptr_val.ptrField(field_index, pt); return Air.internedToRef(val.toIntern()); } const struct_type = mod.typeToStruct(struct_ty).?; const field_ty = struct_type.field_types.get(ip)[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: InternPool.Key.PtrType = .{ .child = field_ty, .flags = .{ .is_const = struct_ptr_ty_info.flags.is_const, .is_volatile = struct_ptr_ty_info.flags.is_volatile, .address_space = struct_ptr_ty_info.flags.address_space, }, }; const parent_align = if (struct_ptr_ty_info.flags.alignment != .none) struct_ptr_ty_info.flags.alignment else try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child)); if (struct_type.layout == .@"packed") { switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) { .bit_ptr => |packed_offset| { ptr_ty_data.flags.alignment = parent_align; ptr_ty_data.packed_offset = packed_offset; }, .byte_ptr => |ptr_info| { ptr_ty_data.flags.alignment = ptr_info.alignment; }, } } else if (struct_type.layout == .@"extern") { // For extern structs, field alignment might be bigger than type's // natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the // second field is aligned as u32. const field_offset = struct_ty.structFieldOffset(field_index, pt); ptr_ty_data.flags.alignment = if (parent_align == .none) .none else @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. const field_align = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, .sema, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align else field_align.min(parent_align); } const ptr_field_ty = try pt.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { try struct_ty.resolveStructFieldInits(pt); const val = try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, .byte_offset = 0, } }); return Air.internedToRef(val); } try sema.requireRuntimeBlock(block, src, null); return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty); } fn structFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, struct_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); try struct_ty.resolveFields(pt); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { const struct_type = ip.loadStructType(struct_ty.toIntern()); if (struct_type.isTuple(ip)) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { try struct_ty.resolveStructFieldInits(pt); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); if (try sema.typeHasOnePossibleValue(field_ty)) |field_val| return Air.internedToRef(field_val.toIntern()); if (try sema.resolveValue(struct_byval)) |struct_val| { if (struct_val.isUndef(mod)) return pt.undefRef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } return Air.internedToRef((try struct_val.fieldValue(pt, field_index)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); try field_ty.resolveLayout(pt); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { if (anon_struct.names.len == 0) { return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); } else { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); } }, else => unreachable, } } fn tupleFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; if (field_name.eqlSlice("len", &mod.intern_pool)) { return pt.intRef(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); } /// Asserts that `field_name` is not "len". fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { const pt = sema.pt; const ip = &pt.zcu.intern_pool; assert(!field_name.eqlSlice("len", ip)); if (field_name.toUnsigned(ip)) |field_index| { if (field_index < tuple_ty.structFieldCount(pt.zcu)) return field_index; return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{ field_name.fmt(ip), tuple_ty.fmt(pt), }); } return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{ field_name.fmt(ip), tuple_ty.fmt(pt), }); } fn tupleFieldValByIndex( sema: *Sema, block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) try tuple_ty.resolveStructFieldInits(pt); if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } if (try sema.resolveValue(tuple_byval)) |tuple_val| { if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) { .undef => pt.undefRef(field_ty), .aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) { .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &mod.intern_pool)), .elems => |elems| Value.fromInterned(elems[field_index]), .repeated_elem => |elem| Value.fromInterned(elem), }.toIntern()), else => unreachable, }; } try sema.requireRuntimeBlock(block, src, null); try field_ty.resolveLayout(pt); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } fn unionFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; assert(union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); try union_ty.resolveFields(pt); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, .is_volatile = union_ptr_info.flags.is_volatile, .address_space = union_ptr_info.flags.address_space, .alignment = if (union_obj.flagsUnordered(ip).layout == .auto) blk: { const union_align = if (union_ptr_info.flags.alignment != .none) union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); const field_align = try pt.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, .packed_offset = union_ptr_info.packed_offset, }); const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?); if (initializing and field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: { switch (union_obj.flagsUnordered(ip).layout) { .auto => if (initializing) { // Store to the union to initialize the tag. const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty)); try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty); } else { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse break :ct; if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?; const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod); const msg = try sema.errMsg(src, "access of union field '{}' while field '{}' is active", .{ field_name.fmt(ip), active_field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } }, .@"packed", .@"extern" => {}, } const field_ptr_val = try union_ptr_val.ptrField(field_index, pt); return Air.internedToRef(field_ptr_val.toIntern()); } try sema.requireRuntimeBlock(block, src, null); if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1) { const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val); try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag); } if (field_ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return .unreachable_value; } return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty); } fn unionFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, union_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); try union_ty.resolveFields(pt); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?); if (try sema.resolveValue(union_byval)) |union_val| { if (union_val.isUndef(zcu)) return pt.undefRef(field_ty); const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.flagsUnordered(ip).layout) { .auto => { if (tag_matches) { return Air.internedToRef(un.val); } else { const msg = msg: { const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), zcu).?; const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, zcu); const msg = try sema.errMsg(src, "access of union field '{}' while field '{}' is active", .{ field_name.fmt(ip), active_field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } }, .@"extern" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); } else if (try sema.bitCastVal(union_val, field_ty, 0, 0, 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(pt, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } } try sema.requireRuntimeBlock(block, src, null); if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1) { const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern()); const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval); try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag); } if (field_ty.zigTypeTag(zcu) == .NoReturn) { _ = try block.addNoOp(.unreach); return .unreachable_value; } try field_ty.resolveLayout(pt); return block.addStructFieldVal(union_byval, field_index, field_ty); } fn elemPtr( sema: *Sema, block: *Block, src: LazySrcLoc, indexable_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.childType(mod), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}), }; try checkIndexable(sema, block, src, indexable_ty); const elem_ptr = switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => blk: { // Tuple field access. const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { const indexable = try sema.analyzeLoad(block, indexable_ptr_src, indexable_ptr, indexable_ptr_src); return elemPtrOneLayerOnly(sema, block, src, indexable, elem_index, elem_index_src, init, oob_safety); }, }; try sema.checkKnownAllocPtr(block, indexable_ptr, elem_ptr); return elem_ptr; } /// Asserts that the type of indexable is pointer. fn elemPtrOneLayerOnly( sema: *Sema, block: *Block, src: LazySrcLoc, indexable: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); const pt = sema.pt; const mod = pt.zcu; try checkIndexable(sema, block, src, indexable_ty); switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); const elem_ptr = try ptr_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr.toIntern()); }; const result_ty = try indexable_ty.elemPtrType(null, pt); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { const child_ty = indexable_ty.childType(mod); const elem_ptr = switch (child_ty.zigTypeTag(mod)) { .Array, .Vector => try sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety), .Struct => blk: { assert(child_ty.isTuple(mod)); const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable }; try sema.checkKnownAllocPtr(block, indexable, elem_ptr); return elem_ptr; }, } } fn elemVal( sema: *Sema, block: *Block, src: LazySrcLoc, indexable: Air.Inst.Ref, elem_index_uncasted: Air.Inst.Ref, elem_index_src: LazySrcLoc, oob_safety: bool, ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); const pt = sema.pt; const mod = pt.zcu; try checkIndexable(sema, block, src, indexable_ty); // TODO in case of a vector of pointers, we need to detect whether the element // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); switch (indexable_ty.zigTypeTag(mod)) { .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try pt.manyConstPtrType(elem_ty); const many_ptr_val = try pt.getCoerced(indexable_val, many_ptr_ty); const elem_ptr_ty = try pt.singleConstPtrType(elem_ty); const elem_ptr_val = try many_ptr_val.ptrElem(index, pt); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern()); } break :rs indexable_src; }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { arr_sent: { const inner_ty = indexable_ty.childType(mod); if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(pt)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, }, .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Vector => { // TODO: If the index is a vector, the result should be a vector. return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety); }, .Struct => { // Tuple field access. const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); const index: u32 = @intCast(try index_val.toUnsignedIntSema(pt)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, } } fn validateRuntimeElemAccess( sema: *Sema, block: *Block, elem_index_src: LazySrcLoc, elem_ty: Type, parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { if (try sema.typeRequiresComptime(elem_ty)) { const msg = msg: { const msg = try sema.errMsg( elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", .{parent_ty.fmt(sema.pt)}, ); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsComptime(msg, parent_src, parent_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } fn tupleFieldPtr( sema: *Sema, block: *Block, tuple_ptr_src: LazySrcLoc, tuple_ptr: Air.Inst.Ref, field_index_src: LazySrcLoc, field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); try tuple_ty.resolveFields(pt); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{}); } if (field_index >= field_count) { return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{ field_index, field_count, }); } const field_ty = tuple_ty.structFieldType(field_index, mod); const ptr_field_ty = try pt.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), .is_volatile = tuple_ptr_ty.isVolatilePtr(mod), .address_space = tuple_ptr_ty.ptrAddressSpace(mod), }, }); if (tuple_ty.structFieldIsComptime(field_index, mod)) try tuple_ty.resolveStructFieldInits(pt); if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_val| { return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = default_val.toIntern() }, .byte_offset = 0, } }))); } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { const field_ptr_val = try tuple_ptr_val.ptrField(field_index, pt); return Air.internedToRef(field_ptr_val.toIntern()); } if (!init) { try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_ptr_src); } try sema.requireRuntimeBlock(block, tuple_ptr_src, null); return block.addStructFieldPtr(tuple_ptr, field_index, ptr_field_ty); } fn tupleField( sema: *Sema, block: *Block, tuple_src: LazySrcLoc, tuple: Air.Inst.Ref, field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const tuple_ty = sema.typeOf(tuple); try tuple_ty.resolveFields(pt); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{}); } if (field_index >= field_count) { return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{ field_index, field_count, }); } const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) try tuple_ty.resolveStructFieldInits(pt); if (try tuple_ty.structFieldValueComptime(pt, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } if (try sema.resolveValue(tuple)) |tuple_val| { if (tuple_val.isUndef(mod)) return pt.undefRef(field_ty); return Air.internedToRef((try tuple_val.fieldValue(pt, field_index)).toIntern()); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); try field_ty.resolveLayout(pt); return block.addStructFieldVal(tuple, field_index, field_ty); } fn elemValArray( sema: *Sema, block: *Block, src: LazySrcLoc, array_src: LazySrcLoc, array: Air.Inst.Ref, elem_index_src: LazySrcLoc, elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const array_ty = sema.typeOf(array); const array_sent = array_ty.sentinel(mod); const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @intFromBool(array_sent != null); const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); } const maybe_undef_array_val = try sema.resolveValue(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); } } if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent != null) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); } } if (maybe_undef_array_val) |array_val| { if (array_val.isUndef(mod)) { return pt.undefRef(elem_ty); } if (maybe_index_val) |index_val| { const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); const elem_val = try array_val.elemValue(pt, index); return Air.internedToRef(elem_val.toIntern()); } } try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, array_ty, array_src); const runtime_src = if (maybe_undef_array_val != null) elem_index_src else array_src; if (oob_safety and block.wantSafety()) { // Runtime check is only needed if unable to comptime check if (maybe_index_val == null) { const len_inst = try pt.intRef(Type.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } } if (try sema.typeHasOnePossibleValue(elem_ty)) |elem_val| return Air.internedToRef(elem_val.toIntern()); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addBinOp(.array_elem_val, array, elem_index); } fn elemPtrArray( sema: *Sema, block: *Block, src: LazySrcLoc, array_ptr_src: LazySrcLoc, array_ptr: Air.Inst.Ref, elem_index_src: LazySrcLoc, elem_index: Air.Inst.Ref, init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = array_ptr_ty.childType(mod); const array_sent = array_ty.sentinel(mod) != null; const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @intFromBool(array_sent); if (array_len_s == 0) { return sema.fail(block, array_ptr_src, "indexing into empty array is not allowed", .{}); } const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); } break :o index; } else null; const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { return pt.undefRef(elem_ptr_ty); } if (offset) |index| { const elem_ptr = try array_ptr_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr.toIntern()); } } if (!init) { try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; try sema.requireRuntimeBlock(block, src, runtime_src); // Runtime check is only needed if unable to comptime check. if (oob_safety and block.wantSafety() and offset == null) { const len_inst = try pt.intRef(Type.usize, array_len); const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } return block.addPtrElemPtr(array_ptr, elem_index, elem_ptr_ty); } fn elemValSlice( sema: *Sema, block: *Block, src: LazySrcLoc, slice_src: LazySrcLoc, slice: Air.Inst.Ref, elem_index_src: LazySrcLoc, elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel(mod) != null; const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; const slice_len = try slice_val.sliceLen(pt); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { const index: usize = @intCast(try index_val.toUnsignedIntSema(pt)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } const elem_ptr_ty = try slice_ty.elemPtrType(index, pt); const elem_ptr_val = try slice_val.ptrElem(index, pt); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } runtime_src = slice_src; } } try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ty, slice_ty, slice_src); try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| try pt.intRef(Type.usize, try slice_val.sliceLen(pt)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } return block.addBinOp(.slice_elem_val, slice, elem_index); } fn elemPtrSlice( sema: *Sema, block: *Block, src: LazySrcLoc, slice_src: LazySrcLoc, slice: Air.Inst.Ref, elem_index_src: LazySrcLoc, elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(pt)); break :o index; } else null; const elem_ptr_ty = try slice_ty.elemPtrType(offset, pt); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { return pt.undefRef(elem_ptr_ty); } const slice_len = try slice_val.sliceLen(pt); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (offset) |index| { if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } const elem_ptr_val = try slice_val.ptrElem(index, pt); return Air.internedToRef(elem_ptr_val.toIntern()); } } try sema.validateRuntimeElemAccess(block, elem_index_src, elem_ptr_ty, slice_ty, slice_src); const runtime_src = if (maybe_undef_slice_val != null) elem_index_src else slice_src; try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } return block.addSliceElemPtr(slice, elem_index, elem_ptr_ty); } fn coerce( sema: *Sema, block: *Block, dest_ty_unresolved: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, .{}) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; } const CoersionError = CompileError || error{ /// When coerce is called recursively, this error should be returned instead of using `fail` /// to ensure correct types in compile errors. NotCoercible, }; const CoerceOpts = struct { /// Should coerceExtra emit error messages. report_err: bool = true, /// Ignored if `report_err == false`. is_ret: bool = false, /// Should coercion to comptime_int emit an error message. no_cast_to_comptime_int: bool = false, param_src: struct { func_inst: Air.Inst.Ref = .none, param_i: u32 = undefined, fn get(info: @This(), sema: *Sema) !?LazySrcLoc { if (info.func_inst == .none) return null; const func_inst = try sema.funcDeclSrcInst(info.func_inst) orelse return null; return .{ .base_node_inst = func_inst, .offset = .{ .fn_proto_param_type = .{ .fn_proto_node_offset = 0, .param_index = info.param_i, } }, }; } } = .{ .func_inst = .none, .param_i = undefined }, }; fn coerceExtra( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { if (dest_ty.isGenericPoison()) return inst; const pt = sema.pt; const zcu = pt.zcu; const dest_ty_src = inst_src; // TODO better source location try dest_ty.resolveFields(pt); const inst_ty = sema.typeOf(inst); try inst_ty.resolveFields(pt); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) return inst; const maybe_inst_val = try sema.resolveValue(inst); var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src, maybe_inst_val); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); const new_val = try block.addBitCast(dest_ty, inst); try sema.checkKnownAllocPtr(block, inst, new_val); return new_val; } switch (dest_ty.zigTypeTag(zcu)) { .Optional => optional: { if (maybe_inst_val) |val| { // undefined sets the optional bit also to undefined. if (val.toIntern() == .undef) { return pt.undefRef(dest_ty); } // null to ?T if (val.toIntern() == .null_value) { return Air.internedToRef((try pt.intern(.{ .opt = .{ .ty = dest_ty.toIntern(), .val = .none, } }))); } } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer if (dest_ty.isPtrLikeOptional(zcu) and dest_ty.elemType2(zcu).toIntern() == .anyopaque_type and inst_ty.isPtrAtRuntime(zcu)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; const elem_ty = inst_ty.elemType2(zcu); if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, } }; break :optional; } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. if (!inst_ty.isPtrLikeOptional(zcu)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // T to ?T const child_type = dest_ty.optionalChild(zcu); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { // Try to give more useful notes in_memory_result = try sema.coerceInMemoryAllowed(block, child_type, inst_ty, false, target, dest_ty_src, inst_src, maybe_inst_val); } break :optional; }, else => |e| return e, }; return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { const dest_info = dest_ty.ptrInfo(zcu); // Function body to function pointer. if (inst_ty.zigTypeTag(zcu) == .Fn) { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_nav = switch (zcu.intern_pool.indexToKey(fn_val.toIntern())) { .func => |f| f.owner_nav, .@"extern" => |e| e.owner_nav, else => unreachable, }; const inst_as_ptr = try sema.analyzeNavRef(inst_src, fn_nav); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } // *T to *[1]T single_item: { if (dest_info.flags.size != .One) break :single_item; if (!inst_ty.isSinglePointer(zcu)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(zcu); const array_ty = Type.fromInterned(dest_info.child); if (array_ty.zigTypeTag(zcu) != .Array) break :single_item; const array_elem_ty = array_ty.childType(zcu); if (array_ty.arrayLen(zcu) != 1) break :single_item; const dest_is_mut = !dest_info.flags.is_const; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val)) { .ok => {}, else => break :single_item, } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // Coercions where the source is a single pointer to an array. src_array_ptr: { if (!inst_ty.isSinglePointer(zcu)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const array_ty = inst_ty.childType(zcu); if (array_ty.zigTypeTag(zcu) != .Array) break :src_array_ptr; const array_elem_type = array_ty.childType(zcu); const dest_is_mut = !dest_info.flags.is_const; const dst_elem_type = Type.fromInterned(dest_info.child); const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val); switch (elem_res) { .ok => {}, else => { in_memory_result = .{ .ptr_child = .{ .child = try elem_res.dupe(sema.arena), .actual = array_elem_type, .wanted = dst_elem_type, } }; break :src_array_ptr; }, } if (dest_info.sentinel != .none) { if (array_ty.sentinel(zcu)) |inst_sent| { if (Air.internedToRef(dest_info.sentinel) != try sema.coerceInMemory(inst_sent, dst_elem_type)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, .wanted = Value.fromInterned(dest_info.sentinel), .ty = dst_elem_type, } }; break :src_array_ptr; } } else { in_memory_result = .{ .ptr_sentinel = .{ .actual = Value.@"unreachable", .wanted = Value.fromInterned(dest_info.sentinel), .ty = dst_elem_type, } }; break :src_array_ptr; } } switch (dest_info.flags.size) { .Slice => { // *[N]T to []T return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src); }, .C => { // *[N]T to [*c]T return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); }, .Many => { // *[N]T to [*]T return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); }, .One => {}, } } // coercion from C pointer if (inst_ty.isCPtr(zcu)) src_c_ptr: { if (dest_info.flags.size == .Slice) break :src_c_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. const src_elem_ty = inst_ty.childType(zcu); const dest_is_mut = !dest_info.flags.is_const; const dst_elem_type = Type.fromInterned(dest_info.child); switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val)) { .ok => {}, else => break :src_c_ptr, } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(zcu) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(zcu); if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, } }; break :pointer; } if (dest_ty.isSlice(zcu)) break :to_anyopaque; if (inst_ty.isSlice(zcu)) { in_memory_result = .{ .slice_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, } }; break :pointer; } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } switch (dest_info.flags.size) { // coercion to C pointer .C => switch (inst_ty.zigTypeTag(zcu)) { .Null => return Air.internedToRef(try pt.intern(.{ .ptr = .{ .ty = dest_ty.toIntern(), .base_addr = .int, .byte_offset = 0, } })), .ComptimeInt => { const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => break :pointer, else => |e| return e, }; return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { // Try to give more useful notes in_memory_result = try sema.coerceInMemoryAllowed(block, ptr_size_ty, inst_ty, false, target, dest_ty_src, inst_src, maybe_inst_val); break :pointer; }, else => |e| return e, }; return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; const inst_info = inst_ty.ptrInfo(zcu); switch (try sema.coerceInMemoryAllowed( block, Type.fromInterned(dest_info.child), Type.fromInterned(inst_info.child), !dest_info.flags.is_const, target, dest_ty_src, inst_src, maybe_inst_val, )) { .ok => {}, else => break :p, } if (inst_info.flags.size == .Slice) { assert(dest_info.sentinel == .none); if (inst_info.sentinel == .none or inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern()) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src); } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); }, else => {}, }, .One => switch (Type.fromInterned(dest_info.child).zigTypeTag(zcu)) { .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(zcu) and inst_ty.childType(zcu).isAnonStruct(zcu) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); } }, .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(zcu) and inst_ty.childType(zcu).isAnonStruct(zcu) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :pointer, else => |e| return e, }; } }, .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(zcu) and inst_ty.childType(zcu).isTuple(zcu) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); } }, else => {}, }, .Slice => to_slice: { if (inst_ty.zigTypeTag(zcu) == .Array) { return sema.fail( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", .{dest_ty.fmt(pt)}, ); } if (!inst_ty.isSinglePointer(zcu)) break :to_slice; const inst_child_ty = inst_ty.childType(zcu); if (!inst_child_ty.isTuple(zcu)) break :to_slice; // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { const align_val = try dest_ty.ptrAlignmentAdvanced(pt, .sema); return Air.internedToRef(try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try pt.intern(.{ .ptr = .{ .ty = dest_ty.slicePtrFieldType(zcu).toIntern(), .base_addr = .int, .byte_offset = align_val.toByteUnits().?, } }), .len = .zero_usize, } })); } // pointer to tuple to slice if (!dest_info.flags.is_const) { const err_msg = err_msg: { const err_msg = try sema.errMsg(inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(pt)}); errdefer err_msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; }; return sema.failWithOwnedErrorMsg(block, err_msg); } return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { if (!inst_ty.isSlice(zcu)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; const inst_info = inst_ty.ptrInfo(zcu); switch (try sema.coerceInMemoryAllowed( block, Type.fromInterned(dest_info.child), Type.fromInterned(inst_info.child), !dest_info.flags.is_const, target, dest_ty_src, inst_src, maybe_inst_val, )) { .ok => {}, else => break :p, } if (dest_info.sentinel == .none or inst_info.sentinel == .none or Air.internedToRef(dest_info.sentinel) != try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child))) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src); }, } }, .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(zcu)) { .Float, .ComptimeFloat => float: { const val = maybe_inst_val orelse { if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, .{ .needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known", }); } break :float; }; const result_val = try sema.intFromFloat(block, inst_src, val, inst_ty, dest_ty, .exact); return Air.internedToRef(result_val.toIntern()); }, .Int, .ComptimeInt => { if (maybe_inst_val) |val| { // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(pt), val.fmtValueSema(pt, sema) }); } return switch (zcu.intern_pool.indexToKey(val.toIntern())) { .undef => try pt.undefRef(dest_ty), .int => |int| Air.internedToRef( try zcu.intern_pool.getCoercedInts(zcu.gpa, pt.tid, int, dest_ty.toIntern()), ), else => unreachable, }; } if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, .{ .needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known", }); } // integer widening const dst_info = dest_ty.intInfo(zcu); const src_info = inst_ty.intInfo(zcu); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.intcast, dest_ty, inst); } }, else => {}, }, .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(zcu)) { .ComptimeFloat => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const result_val = try val.floatCast(dest_ty, pt); return Air.internedToRef(result_val.toIntern()); }, .Float => { if (maybe_inst_val) |val| { const result_val = try val.floatCast(dest_ty, pt); if (!val.eql(try result_val.floatCast(inst_ty, pt), inst_ty, zcu)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", .{ dest_ty.fmt(pt), val.fmtValueSema(pt, sema) }, ); } return Air.internedToRef(result_val.toIntern()); } else if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, .{ .needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known", }); } // float widening const src_bits = inst_ty.floatBits(target); const dst_bits = dest_ty.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.fpext, dest_ty, inst); } }, .Int, .ComptimeInt => int: { const val = maybe_inst_val orelse { if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, .{ .needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known", }); } break :int; }; const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, pt, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { // return sema.fail( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", // .{ dest_ty.fmt(pt), val }, // ); //} return Air.internedToRef(result_val.toIntern()); }, else => {}, }, .Enum => switch (inst_ty.zigTypeTag(zcu)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const string = zcu.intern_pool.indexToKey(val.toIntern()).enum_literal; const field_index = dest_ty.enumFieldIndex(string, zcu) orelse { return sema.fail(block, inst_src, "no field named '{}' in enum '{}'", .{ string.fmt(&zcu.intern_pool), dest_ty.fmt(pt), }); }; return Air.internedToRef((try pt.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern()); }, .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType(zcu) orelse break :blk; if (union_tag_ty.eql(dest_ty, zcu)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, else => {}, }, .ErrorUnion => switch (inst_ty.zigTypeTag(zcu)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { switch (inst_val.toIntern()) { .undef => return pt.undefRef(dest_ty), else => switch (zcu.intern_pool.indexToKey(inst_val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| { const error_set_ty = inst_ty.errorUnionSet(zcu); const error_set_val = Air.internedToRef((try pt.intern(.{ .err = .{ .ty = error_set_ty.toIntern(), .name = err_name, } }))); return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); }, .payload => |payload| { const payload_val = Air.internedToRef(payload); return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { error.NotCoercible => break :eu, else => |e| return e, }; }, }, else => unreachable, }, } } }, .ErrorSet => { // E to E!T return sema.wrapErrorUnionSet(block, dest_ty, inst, inst_src); }, else => eu: { // T to E!T return sema.wrapErrorUnionPayload(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { const payload_type = dest_ty.errorUnionPayload(zcu); // Try to give more useful notes in_memory_result = try sema.coerceInMemoryAllowed(block, payload_type, inst_ty, false, target, dest_ty_src, inst_src, maybe_inst_val); } break :eu; }, else => |e| return e, }; }, }, .Union => switch (inst_ty.zigTypeTag(zcu)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isAnonStruct(zcu)) { return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); } }, else => {}, }, .Array => switch (inst_ty.zigTypeTag(zcu)) { .Array => array_to_array: { // Array coercions are allowed only if the child is IMC and the sentinel is unchanged or removed. if (.ok != try sema.coerceInMemoryAllowed( block, dest_ty.childType(zcu), inst_ty.childType(zcu), false, target, dest_ty_src, inst_src, maybe_inst_val, )) { break :array_to_array; } if (dest_ty.sentinel(zcu)) |dest_sent| { const src_sent = inst_ty.sentinel(zcu) orelse break :array_to_array; if (dest_sent.toIntern() != (try pt.getCoerced(src_sent, dest_ty.childType(zcu))).toIntern()) { break :array_to_array; } } return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src); }, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { return sema.arrayInitEmpty(block, inst_src, dest_ty); } if (inst_ty.isTuple(zcu)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, else => {}, }, .Vector => switch (inst_ty.zigTypeTag(zcu)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isTuple(zcu)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, else => {}, }, .Struct => blk: { if (inst == .empty_struct) { return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); } if (inst_ty.isTupleOrAnonStruct(zcu)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, }; } }, else => {}, } // undefined to anything. We do this after the big switch above so that // special logic has a chance to run first, such as `*[N]T` to `[]T` which // should initialize the length field of the slice. if (maybe_inst_val) |val| if (val.toIntern() == .undef) return pt.undefRef(dest_ty); if (!opts.report_err) return error.NotCoercible; if (opts.is_ret and dest_ty.zigTypeTag(zcu) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .base_node_inst = sema.getOwnerFuncDeclInst(), .offset = .{ .node_offset_fn_type_ret_ty = 0 }, }; try sema.errNote(ret_ty_src, msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), inst_ty.fmt(pt) }); errdefer msg.destroy(sema.gpa); // E!T to T if (inst_ty.zigTypeTag(zcu) == .ErrorUnion and (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(zcu), dest_ty, false, target, dest_ty_src, inst_src, maybe_inst_val)) == .ok) { try sema.errNote(inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(inst_src, msg, "consider using 'try', 'catch', or 'if'", .{}); } // ?T to T if (inst_ty.zigTypeTag(zcu) == .Optional and (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(zcu), dest_ty, false, target, dest_ty_src, inst_src, maybe_inst_val)) == .ok) { try sema.errNote(inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); } try in_memory_result.report(sema, inst_src, msg); // Add notes about function return type if (opts.is_ret and !zcu.test_functions.contains(zcu.funcInfo(sema.owner.unwrap().func).owner_nav)) { const ret_ty_src: LazySrcLoc = .{ .base_node_inst = sema.getOwnerFuncDeclInst(), .offset = .{ .node_offset_fn_type_ret_ty = 0 }, }; if (inst_ty.isError(zcu) and !dest_ty.isError(zcu)) { try sema.errNote(ret_ty_src, msg, "function cannot return an error", .{}); } else { try sema.errNote(ret_ty_src, msg, "function return type declared here", .{}); } } if (try opts.param_src.get(sema)) |param_src| { try sema.errNote(param_src, msg, "parameter type declared here", .{}); } // TODO maybe add "cannot store an error in type '{}'" note break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn coerceInMemory( sema: *Sema, val: Value, dst_ty: Type, ) CompileError!Air.Inst.Ref { return Air.internedToRef((try sema.pt.getCoerced(val, dst_ty)).toIntern()); } const InMemoryCoercionResult = union(enum) { ok, no_match: Pair, int_not_coercible: Int, comptime_int_not_coercible: TypeValuePair, error_union_payload: PairAndChild, array_len: IntPair, array_sentinel: Sentinel, array_elem: PairAndChild, vector_len: IntPair, vector_elem: PairAndChild, optional_shape: Pair, optional_child: PairAndChild, from_anyerror, missing_error: []const InternPool.NullTerminatedString, /// true if wanted is var args fn_var_args: bool, /// true if wanted is generic fn_generic: bool, fn_param_count: IntPair, fn_param_noalias: IntPair, fn_param_comptime: ComptimeParam, fn_param: Param, fn_cc: CC, fn_return_type: PairAndChild, ptr_child: PairAndChild, ptr_addrspace: AddressSpace, ptr_sentinel: Sentinel, ptr_size: Size, ptr_qualifiers: Qualifiers, ptr_allowzero: Pair, ptr_bit_range: BitRange, ptr_alignment: AlignPair, double_ptr_to_anyopaque: Pair, slice_to_anyopaque: Pair, const Pair = struct { actual: Type, wanted: Type, }; const TypeValuePair = struct { actual: Value, wanted: Type, }; const PairAndChild = struct { child: *InMemoryCoercionResult, actual: Type, wanted: Type, }; const Param = struct { child: *InMemoryCoercionResult, actual: Type, wanted: Type, index: u64, }; const ComptimeParam = struct { index: u64, wanted: bool, }; const Sentinel = struct { // unreachable_value indicates no sentinel actual: Value, wanted: Value, ty: Type, }; const Int = struct { actual_signedness: std.builtin.Signedness, wanted_signedness: std.builtin.Signedness, actual_bits: u16, wanted_bits: u16, }; const IntPair = struct { actual: u64, wanted: u64, }; const AlignPair = struct { actual: Alignment, wanted: Alignment, }; const Size = struct { actual: std.builtin.Type.Pointer.Size, wanted: std.builtin.Type.Pointer.Size, }; const Qualifiers = struct { actual_const: bool, wanted_const: bool, actual_volatile: bool, wanted_volatile: bool, }; const AddressSpace = struct { actual: std.builtin.AddressSpace, wanted: std.builtin.AddressSpace, }; const CC = struct { actual: std.builtin.CallingConvention, wanted: std.builtin.CallingConvention, }; const BitRange = struct { actual_host: u16, wanted_host: u16, actual_offset: u16, wanted_offset: u16, }; fn dupe(child: *const InMemoryCoercionResult, arena: Allocator) !*InMemoryCoercionResult { const res = try arena.create(InMemoryCoercionResult); res.* = child.*; return res; } fn report(res: *const InMemoryCoercionResult, sema: *Sema, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { const pt = sema.pt; var cur = res; while (true) switch (cur.*) { .ok => unreachable, .no_match => |types| { try sema.addDeclaredHereNote(msg, types.wanted); try sema.addDeclaredHereNote(msg, types.actual); break; }, .int_not_coercible => |int| { try sema.errNote(src, msg, "{s} {d}-bit int cannot represent all possible {s} {d}-bit values", .{ @tagName(int.wanted_signedness), int.wanted_bits, @tagName(int.actual_signedness), int.actual_bits, }); break; }, .comptime_int_not_coercible => |int| { try sema.errNote(src, msg, "type '{}' cannot represent value '{}'", .{ int.wanted.fmt(pt), int.actual.fmtValueSema(pt, sema), }); break; }, .error_union_payload => |pair| { try sema.errNote(src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .array_len => |lens| { try sema.errNote(src, msg, "array of length {d} cannot cast into an array of length {d}", .{ lens.actual, lens.wanted, }); break; }, .array_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValueSema(pt, sema), sentinel.wanted.fmtValueSema(pt, sema), }); } else { try sema.errNote(src, msg, "destination array requires '{}' sentinel", .{ sentinel.wanted.fmtValueSema(pt, sema), }); } break; }, .array_elem => |pair| { try sema.errNote(src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .vector_len => |lens| { try sema.errNote(src, msg, "vector of length {d} cannot cast into a vector of length {d}", .{ lens.actual, lens.wanted, }); break; }, .vector_elem => |pair| { try sema.errNote(src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .optional_shape => |pair| { try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ pair.actual.optionalChild(pt.zcu).fmt(pt), pair.wanted.optionalChild(pt.zcu).fmt(pt), }); break; }, .optional_child => |pair| { try sema.errNote(src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .from_anyerror => { try sema.errNote(src, msg, "global error set cannot cast into a smaller set", .{}); break; }, .missing_error => |missing_errors| { for (missing_errors) |err| { try sema.errNote(src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&pt.zcu.intern_pool)}); } break; }, .fn_var_args => |wanted_var_args| { if (wanted_var_args) { try sema.errNote(src, msg, "non-variadic function cannot cast into a variadic function", .{}); } else { try sema.errNote(src, msg, "variadic function cannot cast into a non-variadic function", .{}); } break; }, .fn_generic => |wanted_generic| { if (wanted_generic) { try sema.errNote(src, msg, "non-generic function cannot cast into a generic function", .{}); } else { try sema.errNote(src, msg, "generic function cannot cast into a non-generic function", .{}); } break; }, .fn_param_count => |lens| { try sema.errNote(src, msg, "function with {d} parameters cannot cast into a function with {d} parameters", .{ lens.actual, lens.wanted, }); break; }, .fn_param_noalias => |param| { var index: u6 = 0; var actual_noalias = false; while (true) : (index += 1) { const actual: u1 = @truncate(param.actual >> index); const wanted: u1 = @truncate(param.wanted >> index); if (actual != wanted) { actual_noalias = actual == 1; break; } } if (!actual_noalias) { try sema.errNote(src, msg, "regular parameter {d} cannot cast into a noalias parameter", .{index}); } else { try sema.errNote(src, msg, "noalias parameter {d} cannot cast into a regular parameter", .{index}); } break; }, .fn_param_comptime => |param| { if (param.wanted) { try sema.errNote(src, msg, "non-comptime parameter {d} cannot cast into a comptime parameter", .{param.index}); } else { try sema.errNote(src, msg, "comptime parameter {d} cannot cast into a non-comptime parameter", .{param.index}); } break; }, .fn_param => |param| { try sema.errNote(src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ param.index, param.actual.fmt(pt), param.wanted.fmt(pt), }); cur = param.child; }, .fn_cc => |cc| { try sema.errNote(src, msg, "calling convention '{s}' cannot cast into calling convention '{s}'", .{ @tagName(cc.actual), @tagName(cc.wanted) }); break; }, .fn_return_type => |pair| { try sema.errNote(src, msg, "return type '{}' cannot cast into return type '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); cur = pair.child; }, .ptr_addrspace => |@"addrspace"| { try sema.errNote(src, msg, "address space '{s}' cannot cast into address space '{s}'", .{ @tagName(@"addrspace".actual), @tagName(@"addrspace".wanted) }); break; }, .ptr_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValueSema(pt, sema), sentinel.wanted.fmtValueSema(pt, sema), }); } else { try sema.errNote(src, msg, "destination pointer requires '{}' sentinel", .{ sentinel.wanted.fmtValueSema(pt, sema), }); } break; }, .ptr_size => |size| { try sema.errNote(src, msg, "a {s} pointer cannot cast into a {s} pointer", .{ pointerSizeString(size.actual), pointerSizeString(size.wanted) }); break; }, .ptr_qualifiers => |qualifiers| { const ok_const = !qualifiers.actual_const or qualifiers.wanted_const; const ok_volatile = !qualifiers.actual_volatile or qualifiers.wanted_volatile; if (!ok_const) { try sema.errNote(src, msg, "cast discards const qualifier", .{}); } else if (!ok_volatile) { try sema.errNote(src, msg, "cast discards volatile qualifier", .{}); } break; }, .ptr_allowzero => |pair| { const wanted_allow_zero = pair.wanted.ptrAllowsZero(pt.zcu); const actual_allow_zero = pair.actual.ptrAllowsZero(pt.zcu); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(src, msg, "'{}' could have null values which are illegal in type '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); } else { try sema.errNote(src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); } break; }, .ptr_bit_range => |bit_range| { if (bit_range.actual_host != bit_range.wanted_host) { try sema.errNote(src, msg, "pointer host size '{}' cannot cast into pointer host size '{}'", .{ bit_range.actual_host, bit_range.wanted_host, }); } if (bit_range.actual_offset != bit_range.wanted_offset) { try sema.errNote(src, msg, "pointer bit offset '{}' cannot cast into pointer bit offset '{}'", .{ bit_range.actual_offset, bit_range.wanted_offset, }); } break; }, .ptr_alignment => |pair| { try sema.errNote(src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{ pair.actual.toByteUnits() orelse 0, pair.wanted.toByteUnits() orelse 0, }); break; }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ pair.actual.fmt(pt), pair.wanted.fmt(pt), }); try sema.errNote(src, msg, "consider using '.ptr'", .{}); break; }, }; } }; fn pointerSizeString(size: std.builtin.Type.Pointer.Size) []const u8 { return switch (size) { .One => "single", .Many => "many", .C => "C", .Slice => unreachable, }; } /// If pointers have the same representation in runtime memory, a bitcast AIR instruction /// may be used for the coercion. /// * `const` attribute can be gained /// * `volatile` attribute can be gained /// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut /// * alignment can be decreased /// * bit offset attributes must match exactly /// * `*`/`[*]` must match exactly, but `[*c]` matches either one /// * sentinel-terminated pointers can coerce into `[*]` pub fn coerceInMemoryAllowed( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, src_val: ?Value, ) CompileError!InMemoryCoercionResult { const pt = sema.pt; const mod = pt.zcu; if (dest_ty.eql(src_ty, mod)) return .ok; const dest_tag = dest_ty.zigTypeTag(mod); const src_tag = src_ty.zigTypeTag(mod); // Differently-named integers with the same number of bits. if (dest_tag == .Int and src_tag == .Int) { const dest_info = dest_ty.intInfo(mod); const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) { return .ok; } if ((src_info.signedness == dest_info.signedness and dest_info.bits < src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dest_info.signedness == .signed and src_info.signedness == .unsigned and dest_info.bits <= src_info.bits) or (dest_info.signedness == .unsigned and src_info.signedness == .signed)) { return InMemoryCoercionResult{ .int_not_coercible = .{ .actual_signedness = src_info.signedness, .wanted_signedness = dest_info.signedness, .actual_bits = src_info.bits, .wanted_bits = dest_info.bits, } }; } } // Comptime int to regular int. if (dest_tag == .Int and src_tag == .ComptimeInt) { if (src_val) |val| { if (!(try sema.intFitsInType(val, dest_ty, null))) { return .{ .comptime_int_not_coercible = .{ .wanted = dest_ty, .actual = val } }; } } } // Differently-named floats with the same number of bits. if (dest_tag == .Float and src_tag == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { return .ok; } } // Pointers / Pointer-like Optionals const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); } } // Slices if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); } // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { const dest_payload = dest_ty.errorUnionPayload(mod); const src_payload = src_ty.errorUnionPayload(mod); const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src, null); if (child != .ok) { return InMemoryCoercionResult{ .error_union_payload = .{ .child = try child.dupe(sema.arena), .actual = src_payload, .wanted = dest_payload, } }; } return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src, null); } // Error Sets if (dest_tag == .ErrorSet and src_tag == .ErrorSet) { return try sema.coerceInMemoryAllowedErrorSets(block, dest_ty, src_ty, dest_src, src_src); } // Arrays if (dest_tag == .Array and src_tag == .Array) { const dest_info = dest_ty.arrayInfo(mod); const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, .wanted = dest_info.len, } }; } const child = try sema.coerceInMemoryAllowed(block, dest_info.elem_type, src_info.elem_type, dest_is_mut, target, dest_src, src_src, null); switch (child) { .ok => {}, .no_match => return child, else => { return InMemoryCoercionResult{ .array_elem = .{ .child = try child.dupe(sema.arena), .actual = src_info.elem_type, .wanted = dest_info.elem_type, } }; }, } const ok_sent = (dest_info.sentinel == null and src_info.sentinel == null) or (src_info.sentinel != null and dest_info.sentinel != null and dest_info.sentinel.?.eql( try pt.getCoerced(src_info.sentinel.?, dest_info.elem_type), dest_info.elem_type, mod, )); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = src_info.sentinel orelse Value.@"unreachable", .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.elem_type, } }; } return .ok; } // Vectors if (dest_tag == .Vector and src_tag == .Vector) { const dest_len = dest_ty.vectorLen(mod); const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, .wanted = dest_len, } }; } const dest_elem_ty = dest_ty.scalarType(mod); const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src, null); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ .child = try child.dupe(sema.arena), .actual = src_elem_ty, .wanted = dest_elem_ty, } }; } return .ok; } // Arrays <-> Vectors if ((dest_tag == .Vector and src_tag == .Array) or (dest_tag == .Array and src_tag == .Vector)) { const dest_len = dest_ty.arrayLen(mod); const src_len = src_ty.arrayLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_len, .wanted = dest_len, } }; } const dest_elem_ty = dest_ty.childType(mod); const src_elem_ty = src_ty.childType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src, null); if (child != .ok) { return InMemoryCoercionResult{ .array_elem = .{ .child = try child.dupe(sema.arena), .actual = src_elem_ty, .wanted = dest_elem_ty, } }; } if (dest_tag == .Array) { const dest_info = dest_ty.arrayInfo(mod); if (dest_info.sentinel != null) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = Value.@"unreachable", .wanted = dest_info.sentinel.?, .ty = dest_info.elem_type, } }; } } // The memory layout of @Vector(N, iM) is the same as the integer type i(N*M), // that is to say, the padding bits are not in the same place as the array [N]iM. // If there's no padding, the bitcast is possible. const elem_bit_size = dest_elem_ty.bitSize(pt); const elem_abi_byte_size = dest_elem_ty.abiSize(pt); if (elem_abi_byte_size * 8 == elem_bit_size) return .ok; } // Optionals if (dest_tag == .Optional and src_tag == .Optional) { if ((maybe_dest_ptr_ty != null) != (maybe_src_ptr_ty != null)) { return InMemoryCoercionResult{ .optional_shape = .{ .actual = src_ty, .wanted = dest_ty, } }; } const dest_child_type = dest_ty.optionalChild(mod); const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src, null); if (child != .ok) { return InMemoryCoercionResult{ .optional_child = .{ .child = try child.dupe(sema.arena), .actual = src_child_type, .wanted = dest_child_type, } }; } return .ok; } // Tuples (with in-memory-coercible fields) if (dest_ty.isTuple(mod) and src_ty.isTuple(mod)) tuple: { if (dest_ty.containerLayout(mod) != src_ty.containerLayout(mod)) break :tuple; if (dest_ty.structFieldCount(mod) != src_ty.structFieldCount(mod)) break :tuple; const field_count = dest_ty.structFieldCount(mod); for (0..field_count) |field_idx| { if (dest_ty.structFieldIsComptime(field_idx, mod) != src_ty.structFieldIsComptime(field_idx, mod)) break :tuple; if (dest_ty.structFieldAlign(field_idx, pt) != src_ty.structFieldAlign(field_idx, pt)) break :tuple; const dest_field_ty = dest_ty.structFieldType(field_idx, mod); const src_field_ty = src_ty.structFieldType(field_idx, mod); const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src, null); if (field != .ok) break :tuple; } return .ok; } return InMemoryCoercionResult{ .no_match = .{ .actual = dest_ty, .wanted = src_ty, } }; } fn coerceInMemoryAllowedErrorSets( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. if (dest_ty.isAnyError(mod)) { return .ok; } if (dest_ty.toIntern() == .adhoc_inferred_error_set_type) { // We are trying to coerce an error set to the current function's // inferred error set. const dst_ies = sema.fn_ret_ty_ies.?; try dst_ies.addErrorSet(src_ty, ip, sema.arena); return .ok; } if (ip.isInferredErrorSetType(dest_ty.toIntern())) { const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern()); if (sema.fn_ret_ty_ies) |dst_ies| { if (dst_ies.func == dst_ies_func_index) { // We are trying to coerce an error set to the current function's // inferred error set. try dst_ies.addErrorSet(src_ty, ip, sema.arena); return .ok; } } switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) { // isAnyError might have changed from a false negative to a true // positive after resolution. .anyerror_type => return .ok, else => {}, } } var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); switch (src_ty.toIntern()) { .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { .simple_type => unreachable, // filtered out above .error_set_type, .inferred_error_set_type => return .from_anyerror, else => unreachable, }, else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => { const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern()); // src anyerror status might have changed after the resolution. if (resolved_src_ty == .anyerror_type) { // dest_ty.isAnyError(mod) == true is already checked for at this point. return .from_anyerror; } for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { try missing_error_buf.append(key); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } return .ok; }, .error_set_type => |error_set_type| { for (error_set_type.names.get(ip)) |name| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { try missing_error_buf.append(name); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } return .ok; }, else => unreachable, }, } } fn coerceInMemoryAllowedFns( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const dest_info = mod.typeToFunc(dest_ty).?; const src_info = mod.typeToFunc(src_ty).?; { if (dest_info.is_var_args != src_info.is_var_args) { return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; } if (dest_info.is_generic != src_info.is_generic) { return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; } if (dest_info.cc != src_info.cc) { return InMemoryCoercionResult{ .fn_cc = .{ .actual = src_info.cc, .wanted = dest_info.cc, } }; } switch (src_info.return_type) { .noreturn_type, .generic_poison_type => {}, else => { const dest_return_type = Type.fromInterned(dest_info.return_type); const src_return_type = Type.fromInterned(src_info.return_type); const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src, null); if (rt != .ok) { return InMemoryCoercionResult{ .fn_return_type = .{ .child = try rt.dupe(sema.arena), .actual = src_return_type, .wanted = dest_return_type, } }; } }, } } const params_len = params_len: { if (dest_info.param_types.len != src_info.param_types.len) { return InMemoryCoercionResult{ .fn_param_count = .{ .actual = src_info.param_types.len, .wanted = dest_info.param_types.len, } }; } if (dest_info.noalias_bits != src_info.noalias_bits) { return InMemoryCoercionResult{ .fn_param_noalias = .{ .actual = src_info.noalias_bits, .wanted = dest_info.noalias_bits, } }; } break :params_len dest_info.param_types.len; }; for (0..params_len) |param_i| { const dest_param_ty = Type.fromInterned(dest_info.param_types.get(ip)[param_i]); const src_param_ty = Type.fromInterned(src_info.param_types.get(ip)[param_i]); const param_i_small: u5 = @intCast(param_i); if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = param_i, .wanted = dest_info.paramIsComptime(param_i_small), } }; } switch (src_param_ty.toIntern()) { .generic_poison_type => {}, else => { // Note: Cast direction is reversed here. const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src, null); if (param != .ok) { return InMemoryCoercionResult{ .fn_param = .{ .child = try param.dupe(sema.arena), .actual = src_param_ty, .wanted = dest_param_ty, .index = param_i, } }; } }, } } return .ok; } fn coerceInMemoryAllowedPtrs( sema: *Sema, block: *Block, dest_ty: Type, src_ty: Type, dest_ptr_ty: Type, src_ptr_ty: Type, dest_is_mut: bool, target: std.Target, dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { const pt = sema.pt; const zcu = pt.zcu; const dest_info = dest_ptr_ty.ptrInfo(zcu); const src_info = src_ptr_ty.ptrInfo(zcu); const ok_ptr_size = src_info.flags.size == dest_info.flags.size or src_info.flags.size == .C or dest_info.flags.size == .C; if (!ok_ptr_size) { return InMemoryCoercionResult{ .ptr_size = .{ .actual = src_info.flags.size, .wanted = dest_info.flags.size, } }; } const ok_cv_qualifiers = (!src_info.flags.is_const or dest_info.flags.is_const) and (!src_info.flags.is_volatile or dest_info.flags.is_volatile); if (!ok_cv_qualifiers) { return InMemoryCoercionResult{ .ptr_qualifiers = .{ .actual_const = src_info.flags.is_const, .wanted_const = dest_info.flags.is_const, .actual_volatile = src_info.flags.is_volatile, .wanted_volatile = dest_info.flags.is_volatile, } }; } if (dest_info.flags.address_space != src_info.flags.address_space) { return InMemoryCoercionResult{ .ptr_addrspace = .{ .actual = src_info.flags.address_space, .wanted = dest_info.flags.address_space, } }; } const dest_child = Type.fromInterned(dest_info.child); const src_child = Type.fromInterned(src_info.child); const child = try sema.coerceInMemoryAllowed(block, dest_child, src_child, !dest_info.flags.is_const, target, dest_src, src_src, null); if (child != .ok) allow: { // As a special case, we also allow coercing `*[n:s]T` to `*[n]T`, akin to dropping the sentinel from a slice. // `*[n:s]T` cannot coerce in memory to `*[n]T` since they have different sizes. if (src_child.zigTypeTag(zcu) == .Array and dest_child.zigTypeTag(zcu) == .Array and src_child.sentinel(zcu) != null and dest_child.sentinel(zcu) == null and .ok == try sema.coerceInMemoryAllowed(block, dest_child.childType(zcu), src_child.childType(zcu), !dest_info.flags.is_const, target, dest_src, src_src, null)) { break :allow; } return InMemoryCoercionResult{ .ptr_child = .{ .child = try child.dupe(sema.arena), .actual = Type.fromInterned(src_info.child), .wanted = Type.fromInterned(dest_info.child), } }; } const dest_allow_zero = dest_ty.ptrAllowsZero(zcu); const src_allow_zero = src_ty.ptrAllowsZero(zcu); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or (!dest_allow_zero and !src_allow_zero); if (!ok_allows_zero) { return InMemoryCoercionResult{ .ptr_allowzero = .{ .actual = src_ty, .wanted = dest_ty, } }; } if (src_info.packed_offset.host_size != dest_info.packed_offset.host_size or src_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset) { return InMemoryCoercionResult{ .ptr_bit_range = .{ .actual_host = src_info.packed_offset.host_size, .wanted_host = dest_info.packed_offset.host_size, .actual_offset = src_info.packed_offset.bit_offset, .wanted_offset = dest_info.packed_offset.bit_offset, } }; } const ok_sent = dest_info.sentinel == .none or src_info.flags.size == .C or (src_info.sentinel != .none and dest_info.sentinel == try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_info.sentinel, dest_info.child)); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ .actual = switch (src_info.sentinel) { .none => Value.@"unreachable", else => Value.fromInterned(src_info.sentinel), }, .wanted = switch (dest_info.sentinel) { .none => Value.@"unreachable", else => Value.fromInterned(dest_info.sentinel), }, .ty = Type.fromInterned(dest_info.child), } }; } // If both pointers have alignment 0, it means they both want ABI alignment. // In this case, if they share the same child type, no need to resolve // pointee type alignment. Otherwise both pointee types must have their alignment // resolved and we compare the alignment numerically. if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or dest_info.child != src_info.child) { const src_align = if (src_info.flags.alignment != .none) src_info.flags.alignment else try sema.typeAbiAlignment(Type.fromInterned(src_info.child)); const dest_align = if (dest_info.flags.alignment != .none) dest_info.flags.alignment else try sema.typeAbiAlignment(Type.fromInterned(dest_info.child)); if (dest_align.compare(.gt, src_align)) { return InMemoryCoercionResult{ .ptr_alignment = .{ .actual = src_align, .wanted = dest_align, } }; } } return .ok; } fn coerceVarArgParam( sema: *Sema, block: *Block, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (block.is_typeof) return inst; const pt = sema.pt; const zcu = pt.zcu; const uncasted_ty = sema.typeOf(inst); const coerced = switch (uncasted_ty.zigTypeTag(zcu)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, inst_src, "integer and float literals passed to variadic function must be casted to a fixed-size number type", .{}, ), .Fn => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_nav = zcu.funcInfo(fn_val.toIntern()).owner_nav; break :fn_ptr try sema.analyzeNavRef(inst_src, fn_nav); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { const target = zcu.getTarget(); const double_bits = target.cTypeBitSize(.double); const inst_bits = uncasted_ty.floatBits(target); if (inst_bits >= double_bits) break :float inst; switch (double_bits) { 32 => break :float try sema.coerce(block, Type.f32, inst, inst_src), 64 => break :float try sema.coerce(block, Type.f64, inst, inst_src), else => unreachable, } }, else => if (uncasted_ty.isAbiInt(zcu)) int: { if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst; const target = zcu.getTarget(); const uncasted_info = uncasted_ty.intInfo(zcu); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { .signed => .int, .unsigned => .uint, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { .signed => Type.c_int, .unsigned => Type.c_uint, }, inst, inst_src); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { .signed => .long, .unsigned => .ulong, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { .signed => Type.c_long, .unsigned => Type.c_ulong, }, inst, inst_src); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { .signed => .longlong, .unsigned => .ulonglong, })) break :int try sema.coerce(block, switch (uncasted_info.signedness) { .signed => Type.c_longlong, .unsigned => Type.c_ulonglong, }, inst, inst_src); break :int inst; } else inst, }; const coerced_ty = sema.typeOf(coerced); if (!try sema.validateExternType(coerced_ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot pass '{}' to variadic function", .{coerced_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, inst_src, coerced_ty, .param_ty); try sema.addDeclaredHereNote(msg, coerced_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } return coerced; } // TODO migrate callsites to use storePtr2 instead. fn storePtr( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_operand: Air.Inst.Ref, ) CompileError!void { const air_tag: Air.Inst.Tag = if (block.wantSafety()) .store_safe else .store; return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, air_tag); } fn storePtr2( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, uncasted_operand: Air.Inst.Ref, operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy // which would occur if we used `coerce`. // However, we avoid this mechanism if the destination element type is a tuple, // because the regular store will be better for this case. // If the destination type is a struct we don't want this mechanism to trigger, because // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(mod); var i: u32 = 0; while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i); const elem_index = try pt.intRef(Type.usize, i); const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true); try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store); } return; } // TODO do the same thing for anon structs as for tuples above. // However, beware of the need to handle missing/extra fields. const is_ret = air_tag == .ret_ptr; // Detect if we are storing an array operand to a bitcasted vector pointer. // If so, we instead reach through the bitcasted pointer to the vector pointer, // bitcast the array operand to a vector, and then lower this as a store of // a vector value to a vector pointer. This generally results in better code, // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store); return; } const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, }; const maybe_operand_val = try sema.resolveValue(operand); const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (sema.isComptimeMutablePtr(ptr_val)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; } else ptr_src; // We do this after the possible comptime store above, for the case of field_ptr stores // to unions because we want the comptime tag to be set, even if the field type is void. if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { return; } try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) { const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl; const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data; _ = try block.addInst(.{ .tag = .vector_store_elem, .data = .{ .vector_store_elem = .{ .vector_ptr = bin_op.lhs, .payload = try block.sema.addExtra(Air.Bin{ .lhs = bin_op.rhs, .rhs = operand, }), } }, }); return; } return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{ ptr_ty.fmt(pt), }); } const store_inst = if (is_ret) try block.addBinOp(.store, ptr, operand) else try block.addBinOp(air_tag, ptr, operand); try sema.checkComptimeKnownStore(block, store_inst, operand_src); return; } /// Given an AIR store instruction, checks whether we are performing a /// comptime-known store to a local alloc, and updates `maybe_comptime_allocs` /// accordingly. /// Handles calling `validateRuntimeValue` if the store is runtime for any reason. fn checkComptimeKnownStore(sema: *Sema, block: *Block, store_inst_ref: Air.Inst.Ref, store_src: LazySrcLoc) !void { const store_inst = store_inst_ref.toIndex().?; const inst_data = sema.air_instructions.items(.data)[@intFromEnum(store_inst)].bin_op; const ptr = inst_data.lhs.toIndex() orelse return; const operand = inst_data.rhs; known: { const maybe_base_alloc = sema.base_allocs.get(ptr) orelse break :known; const maybe_comptime_alloc = sema.maybe_comptime_allocs.getPtr(maybe_base_alloc) orelse break :known; if ((try sema.resolveValue(operand)) != null and block.runtime_index == maybe_comptime_alloc.runtime_index) { try maybe_comptime_alloc.stores.append(sema.arena, .{ .inst = store_inst, .src = store_src, }); return; } // We're newly discovering that this alloc is runtime-known. try sema.markMaybeComptimeAllocRuntime(block, maybe_base_alloc); } try sema.validateRuntimeValue(block, store_src, operand); } /// Given an AIR instruction transforming a pointer (struct_field_ptr, /// ptr_elem_ptr, bitcast, etc), checks whether the base pointer refers to a /// local alloc, and updates `base_allocs` accordingly. fn checkKnownAllocPtr(sema: *Sema, block: *Block, base_ptr: Air.Inst.Ref, new_ptr: Air.Inst.Ref) !void { const base_ptr_inst = base_ptr.toIndex() orelse return; const new_ptr_inst = new_ptr.toIndex() orelse return; const alloc_inst = sema.base_allocs.get(base_ptr_inst) orelse return; try sema.base_allocs.put(sema.gpa, new_ptr_inst, alloc_inst); switch (sema.air_instructions.items(.tag)[@intFromEnum(new_ptr_inst)]) { .optional_payload_ptr_set, .errunion_payload_ptr_set => { const maybe_comptime_alloc = sema.maybe_comptime_allocs.getPtr(alloc_inst) orelse return; // This is functionally a store, since it writes the optional payload bit. // Thus, if it is behind a runtime condition, we must mark the alloc as runtime appropriately. if (block.runtime_index != maybe_comptime_alloc.runtime_index) { return sema.markMaybeComptimeAllocRuntime(block, alloc_inst); } try maybe_comptime_alloc.stores.append(sema.arena, .{ .inst = new_ptr_inst, .src = LazySrcLoc.unneeded, }); }, .ptr_elem_ptr => { const tmp_air = sema.getTmpAir(); const pl_idx = tmp_air.instructions.items(.data)[@intFromEnum(new_ptr_inst)].ty_pl.payload; const bin = tmp_air.extraData(Air.Bin, pl_idx).data; const index_ref = bin.rhs; // If the index value is runtime-known, this pointer is also runtime-known, so // we must in turn make the alloc value runtime-known. if (null == try sema.resolveValue(index_ref)) { try sema.markMaybeComptimeAllocRuntime(block, alloc_inst); } }, else => {}, } } fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Inst.Index) CompileError!void { const maybe_comptime_alloc = (sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return).value; // Since the alloc has been determined to be runtime, we must check that // all other stores to it are permitted to be runtime values. const slice = maybe_comptime_alloc.stores.slice(); for (slice.items(.inst), slice.items(.src)) |other_inst, other_src| { if (other_src.offset == .unneeded) { switch (sema.air_instructions.items(.tag)[@intFromEnum(other_inst)]) { .set_union_tag, .optional_payload_ptr_set, .errunion_payload_ptr_set => continue, else => unreachable, // assertion failure } } const other_data = sema.air_instructions.items(.data)[@intFromEnum(other_inst)].bin_op; const other_operand = other_data.rhs; if (!sema.checkRuntimeValue(other_operand)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(other_src, "runtime value contains reference to comptime var", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(other_src, msg, "comptime var pointers are not available at runtime", .{}); break :msg msg; }); } } } /// Traverse an arbitrary number of bitcasted pointers and return the underyling vector /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_ref = ptr; var ptr_inst = ptr_ref.toIndex() orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) { ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand; if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null; const child_ty = sema.typeOf(ptr_ref).childType(mod); if (child_ty.zigTypeTag(mod) == .Vector) break child_ty; ptr_inst = ptr_ref.toIndex() orelse return null; } else return null; // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. if (array_ty.childType(mod).eql(vector_ty.childType(mod), mod) and array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { return ptr_ref; } else { return null; } } /// Call when you have Value objects rather than Air instructions, and you want to /// assert the store must be done at comptime. fn storePtrVal( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, operand_val: Value, operand_ty: Type, ) !void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; // TODO: audit use sites to eliminate this coercion const coerced_operand_val = try pt.getCoerced(operand_val, operand_ty); // TODO: audit use sites to eliminate this coercion const ptr_ty = try pt.ptrType(info: { var info = ptr_val.typeOf(zcu).ptrInfo(zcu); info.child = operand_ty.toIntern(); break :info info; }); const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty); switch (try sema.storeComptimePtr(block, src, coerced_ptr_val, coerced_operand_val)) { .success => {}, .runtime_store => unreachable, // use sites check this // TODO use failWithInvalidComptimeFieldStore .comptime_field_mismatch => return sema.fail( block, src, "value stored in comptime field does not match the default value of the field", .{}, ), .undef => return sema.failWithUseOfUndef(block, src), .err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}), .null_payload => return sema.fail(block, src, "attempt to use null value", .{}), .inactive_union_field => return sema.fail(block, src, "access of inactive union field", .{}), .needed_well_defined => |ty| return sema.fail( block, src, "comptime dereference requires '{}' to have a well-defined layout", .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, src, "dereference of '{}' exceeds bounds of containing decl of type '{}'", .{ ptr_ty.fmt(pt), ty.fmt(pt) }, ), .exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}), } } fn bitCast( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; try dest_ty.resolveLayout(pt); const old_ty = sema.typeOf(inst); try old_ty.resolveLayout(pt); const dest_bits = dest_ty.bitSize(pt); const old_bits = old_ty.bitSize(pt); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ dest_ty.fmt(pt), dest_bits, old_ty.fmt(pt), old_bits, }); } if (try sema.resolveValue(inst)) |val| { if (val.isUndef(zcu)) return pt.undefRef(dest_ty); if (old_ty.zigTypeTag(zcu) == .ErrorSet and dest_ty.zigTypeTag(zcu) == .ErrorSet) { // Special case: we sometimes call `bitCast` on error set values, but they // don't have a well-defined layout, so we can't use `bitCastVal` on them. return Air.internedToRef((try pt.getCoerced(val, dest_ty)).toIntern()); } if (try sema.bitCastVal(val, dest_ty, 0, 0, 0)) |result_val| { return Air.internedToRef(result_val.toIntern()); } } try sema.requireRuntimeBlock(block, inst_src, operand_src); try sema.validateRuntimeValue(block, inst_src, inst); return block.addBitCast(dest_ty, inst); } fn coerceArrayPtrToSlice( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; if (try sema.resolveValue(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); const slice_ptr_ty = dest_ty.slicePtrFieldType(mod); const slice_ptr = try pt.getCoerced(val, slice_ptr_ty); const slice_val = try pt.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = slice_ptr.toIntern(), .len = (try pt.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return Air.internedToRef(slice_val); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.array_to_slice, dest_ty, inst); } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { const pt = sema.pt; const mod = pt.zcu; const dest_info = dest_ty.ptrInfo(mod); const inst_info = inst_ty.ptrInfo(mod); const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or (Type.fromInterned(inst_info.child).arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or (Type.fromInterned(inst_info.child).isTuple(mod) and Type.fromInterned(inst_info.child).structFieldCount(mod) == 0); const ok_cv_qualifiers = ((!inst_info.flags.is_const or dest_info.flags.is_const) or len0) and (!inst_info.flags.is_volatile or dest_info.flags.is_volatile); if (!ok_cv_qualifiers) { in_memory_result.* = .{ .ptr_qualifiers = .{ .actual_const = inst_info.flags.is_const, .wanted_const = dest_info.flags.is_const, .actual_volatile = inst_info.flags.is_volatile, .wanted_volatile = dest_info.flags.is_volatile, } }; return false; } if (dest_info.flags.address_space != inst_info.flags.address_space) { in_memory_result.* = .{ .ptr_addrspace = .{ .actual = inst_info.flags.address_space, .wanted = dest_info.flags.address_space, } }; return false; } if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true; if (len0) return true; const inst_align = if (inst_info.flags.alignment != .none) inst_info.flags.alignment else Type.fromInterned(inst_info.child).abiAlignment(pt); const dest_align = if (dest_info.flags.alignment != .none) dest_info.flags.alignment else Type.fromInterned(dest_info.child).abiAlignment(pt); if (dest_align.compare(.gt, inst_align)) { in_memory_result.* = .{ .ptr_alignment = .{ .actual = inst_align, .wanted = dest_align, } }; return false; } return true; } fn coerceCompatiblePtrs( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_ty = sema.typeOf(inst); if (try sema.resolveValue(inst)) |val| { if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(pt)}); } // The comptime Value representation is compatible with both types. return Air.internedToRef( (try pt.getCoerced(val, dest_ty)).toIntern(), ); } try sema.requireRuntimeBlock(block, inst_src, null); const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const actual_ptr = if (inst_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else inst; const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); const ok = if (inst_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, inst_src, inst); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bool_or, len_zero, is_non_zero); } else is_non_zero; try sema.addSafetyCheck(block, inst_src, ok, .cast_to_null); } const new_ptr = try sema.bitCast(block, dest_ty, inst, inst_src, null); try sema.checkKnownAllocPtr(block, inst, new_ptr); return new_ptr; } fn coerceEnumToUnion( sema: *Sema, block: *Block, union_ty: Type, union_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ union_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(union_ty_src, msg, "cannot coerce enum to untagged union", .{}); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { const field_index = union_ty.unionTagFieldIndex(val, pt.zcu) orelse { return sema.fail(block, inst_src, "union '{}' has no tag with value '{}'", .{ union_ty.fmt(pt), val.fmtValueSema(pt, sema), }); }; const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); try field_ty.resolveFields(pt); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse { const msg = msg: { const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; const msg = try sema.errMsg(inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{ inst_ty.fmt(pt), union_ty.fmt(pt), field_ty.fmt(pt), field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; return Air.internedToRef((try pt.unionValue(union_ty, val, opv)).toIntern()); } try sema.requireRuntimeBlock(block, inst_src, null); if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ union_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const union_obj = mod.typeToUnion(union_ty).?; { var msg: ?*Module.ErrorMsg = null; errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| { if (Type.fromInterned(field_ty).zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( inst_src, "runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field", .{ tag_ty.fmt(pt), union_ty.fmt(pt) }, ); msg = err_msg; try sema.addFieldErrNote(union_ty, field_index, err_msg, "'noreturn' field here", .{}); } } if (msg) |some| { msg = null; try sema.addDeclaredHereNote(some, union_ty); return sema.failWithOwnedErrorMsg(block, some); } } // If the union has all fields 0 bits, the union value is just the enum value. if (union_ty.unionHasAllZeroBitFieldTypes(pt)) { return block.addBitCast(union_ty, enum_tag); } const msg = msg: { const msg = try sema.errMsg( inst_src, "runtime coercion from enum '{}' to union '{}' which has non-void fields", .{ tag_ty.fmt(pt), union_ty.fmt(pt) }, ); errdefer msg.destroy(sema.gpa); for (0..union_obj.field_types.len) |field_index| { const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (!(try sema.typeHasRuntimeBits(field_ty))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ field_name.fmt(ip), field_ty.fmt(pt), }); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } fn coerceAnonStructToUnion( sema: *Sema, block: *Block, union_ty: Type, union_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const field_info: union(enum) { name: InternPool.NullTerminatedString, count: usize, } = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) .{ .name = anon_struct_type.names.get(ip)[0] } else .{ .count = anon_struct_type.names.len }, .struct_type => name: { const field_names = ip.loadStructType(inst_ty.toIntern()).field_names.get(ip); break :name if (field_names.len == 1) .{ .name = field_names[0] } else .{ .count = field_names.len }; }, else => unreachable, }; switch (field_info) { .name => |field_name| { const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); }, .count => |field_count| { assert(field_count != 1); const msg = msg: { const msg = if (field_count > 1) try sema.errMsg( inst_src, "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ) else try sema.errMsg( inst_src, "union initializer must initialize one field", .{}, ); errdefer msg.destroy(sema.gpa); // TODO add notes for where the anon struct was created to point out // the extra fields. try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }, } } fn coerceAnonStructToUnionPtrs( sema: *Sema, block: *Block, ptr_union_ty: Type, union_ty_src: LazySrcLoc, ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); } fn coerceAnonStructToStructPtrs( sema: *Sema, block: *Block, ptr_struct_ty: Type, struct_ty_src: LazySrcLoc, ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); } /// If the lengths match, coerces element-wise. fn coerceArrayLike( sema: *Sema, block: *Block, dest_ty: Type, dest_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_ty = sema.typeOf(inst); const target = mod.getTarget(); // try coercion of the whole array const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src, null); if (in_memory_result == .ok) { if (try sema.resolveValue(inst)) |inst_val| { // These types share the same comptime value representation. return sema.coerceInMemory(inst_val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); } // otherwise, try element by element const inst_len = inst_ty.arrayLen(mod); const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len}); try sema.errNote(inst_src, msg, "source has length {d}", .{inst_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const dest_elem_ty = dest_ty.childType(mod); if (dest_ty.isVector(mod) and inst_ty.isVector(mod) and (try sema.resolveValue(inst)) == null) { const inst_elem_ty = inst_ty.childType(mod); switch (dest_elem_ty.zigTypeTag(mod)) { .Int => if (inst_elem_ty.isInt(mod)) { // integer widening const dst_info = dest_elem_ty.intInfo(mod); const src_info = inst_elem_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.intcast, dest_ty, inst); } }, .Float => if (inst_elem_ty.isRuntimeFloat()) { // float widening const src_bits = inst_elem_ty.floatBits(target); const dst_bits = dest_elem_ty.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.fpext, dest_ty, inst); } }, else => {}, } } const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i| { const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern()); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); ref.* = coerced; if (runtime_src == null) { if (try sema.resolveValue(coerced)) |elem_val| { val.* = elem_val.toIntern(); } else { runtime_src = elem_src; } } } if (runtime_src) |rs| { try sema.requireRuntimeBlock(block, inst_src, rs); return block.addAggregateInit(dest_ty, element_refs); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = element_vals }, } }))); } /// If the lengths match, coerces element-wise. fn coerceTupleToArray( sema: *Sema, block: *Block, dest_ty: Type, dest_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const inst_ty = sema.typeOf(inst); const inst_len = inst_ty.arrayLen(mod); const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), inst_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); try sema.errNote(dest_ty_src, msg, "destination has length {d}", .{dest_len}); try sema.errNote(inst_src, msg, "source has length {d}", .{inst_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len); const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i: u32 = @intCast(i_usize); if (i_usize == inst_len) { const sentinel_val = dest_ty.sentinel(mod).?; val.* = sentinel_val.toIntern(); ref.* = Air.internedToRef(sentinel_val.toIntern()); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); ref.* = coerced; if (runtime_src == null) { if (try sema.resolveValue(coerced)) |elem_val| { val.* = elem_val.toIntern(); } else { runtime_src = elem_src; } } } if (runtime_src) |rs| { try sema.requireRuntimeBlock(block, inst_src, rs); return block.addAggregateInit(dest_ty, element_refs); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .elems = element_vals }, } }))); } /// If the lengths match, coerces element-wise. fn coerceTupleToSlicePtrs( sema: *Sema, block: *Block, slice_ty: Type, slice_ty_src: LazySrcLoc, ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const slice_info = slice_ty.ptrInfo(mod); const array_ty = try pt.arrayType(.{ .len = tuple_ty.structFieldCount(mod), .sentinel = slice_info.sentinel, .child = slice_info.child, }); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.flags.alignment != .none) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); } const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst); return sema.coerceArrayPtrToSlice(block, slice_ty, ptr_array, slice_ty_src); } /// If the lengths match, coerces element-wise. fn coerceTupleToArrayPtrs( sema: *Sema, block: *Block, ptr_array_ty: Type, array_ty_src: LazySrcLoc, ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = Type.fromInterned(ptr_info.child); const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.flags.alignment != .none) { return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{}); } const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst); return ptr_array; } /// Handles both tuples and anon struct literals. Coerces field-wise. Reports /// errors for both extra fields and missing fields. fn coerceTupleToStruct( sema: *Sema, block: *Block, struct_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; try struct_ty.resolveFields(pt); try struct_ty.resolveStructFieldInits(pt); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } const struct_type = mod.typeToStruct(struct_ty).?; const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len, else => unreachable, }; for (0..field_count) |tuple_field_index| { const field_src = inst_src; // TODO better source location const field_name = inst_ty.structFieldName(tuple_field_index, mod).unwrap() orelse try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{tuple_field_index}, .no_embedded_nulls); const struct_field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); const struct_field_ty = Type.fromInterned(struct_type.field_types.get(ip)[struct_field_index]); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, @intCast(tuple_field_index)); const coerced = try sema.coerce(block, struct_field_ty, elem_ref, field_src); field_refs[struct_field_index] = coerced; if (struct_type.fieldIsComptime(ip, struct_field_index)) { const init_val = (try sema.resolveValue(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); }; const field_init = Value.fromInterned(struct_type.field_inits.get(ip)[struct_field_index]); if (!init_val.eql(field_init, struct_field_ty, pt.zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, tuple_field_index); } } if (runtime_src == null) { if (try sema.resolveValue(coerced)) |field_val| { field_vals[struct_field_index] = field_val.toIntern(); } else { runtime_src = field_src; } } } // Populate default field values and report errors for missing fields. var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; const field_name = struct_type.field_names.get(ip)[i]; const field_default_val = struct_type.fieldInit(ip, i); const field_src = inst_src; // TODO better source location if (field_default_val == .none) { const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(field_src, msg, template, args); } else { root_msg = try sema.errMsg(field_src, template, args); } continue; } if (runtime_src == null) { field_vals[i] = field_default_val; } else { field_ref.* = Air.internedToRef(field_default_val); } } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, struct_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } if (runtime_src) |rs| { try sema.requireRuntimeBlock(block, inst_src, rs); return block.addAggregateInit(struct_ty, field_refs); } const struct_val = try pt.intern(.{ .aggregate = .{ .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(struct_val); return Air.internedToRef(struct_val); } fn coerceTupleToTuple( sema: *Sema, block: *Block, tuple_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.len, else => unreachable, }; const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len, else => unreachable, }; if (src_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; for (0..dest_field_count) |field_index_usize| { const field_i: u32 = @intCast(field_index_usize); const field_src = inst_src; // TODO better source location const field_name = inst_ty.structFieldName(field_index_usize, mod).unwrap() orelse try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index_usize}, .no_embedded_nulls); if (field_name.eqlSlice("len", ip)) return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize], .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.get(ip)[field_index_usize], else => unreachable, }; const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize], .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, field_index_usize), else => unreachable, }; const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src); field_refs[field_index] = coerced; if (default_val != .none) { const init_val = (try sema.resolveValue(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); }; if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveValue(coerced)) |field_val| { field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } } } // Populate default field values and report errors for missing fields. var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); for (field_refs, 0..) |*field_ref, i_usize| { const i: u32 = @intCast(i_usize); if (field_ref.* != .none) continue; const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i], .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, i), else => unreachable, }; const field_src = inst_src; // TODO better source location if (default_val == .none) { const field_name = tuple_ty.structFieldName(i, mod).unwrap() orelse { const template = "missing tuple field: {d}"; if (root_msg) |msg| { try sema.errNote(field_src, msg, template, .{i}); } else { root_msg = try sema.errMsg(field_src, template, .{i}); } continue; }; const template = "missing struct field: {}"; const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(field_src, msg, template, args); } else { root_msg = try sema.errMsg(field_src, template, args); } continue; } if (runtime_src == null) { field_vals[i] = default_val; } else { field_ref.* = Air.internedToRef(default_val); } } if (root_msg) |msg| { try sema.addDeclaredHereNote(msg, tuple_ty); root_msg = null; return sema.failWithOwnedErrorMsg(block, msg); } if (runtime_src) |rs| { try sema.requireRuntimeBlock(block, inst_src, rs); return block.addAggregateInit(tuple_ty, field_refs); } return Air.internedToRef((try pt.intern(.{ .aggregate = .{ .ty = tuple_ty.toIntern(), .storage = .{ .elems = field_vals }, } }))); } fn analyzeNavVal( sema: *Sema, block: *Block, src: LazySrcLoc, nav_index: InternPool.Nav.Index, ) CompileError!Air.Inst.Ref { const ref = try sema.analyzeNavRefInner(src, nav_index, false); return sema.analyzeLoad(block, src, ref, src); } fn addReferenceEntry( sema: *Sema, src: LazySrcLoc, referenced_unit: AnalUnit, ) !void { const zcu = sema.pt.zcu; if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); if (gop.found_existing) return; // TODO: we need to figure out how to model inline calls here. // They aren't references in the analysis sense, but ought to show up in the reference trace! // Would representing inline calls in the reference table cause excessive memory usage? try zcu.addUnitReference(sema.owner, referenced_unit, src); } fn addTypeReferenceEntry( sema: *Sema, src: LazySrcLoc, referenced_type: InternPool.Index, ) !void { const zcu = sema.pt.zcu; if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return; const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type); if (gop.found_existing) return; try zcu.addTypeReference(sema.owner, referenced_type, src); } pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); const cau_index = nav.analysis_owner.unwrap() orelse { assert(nav.status == .resolved); return; }; // Note that even if `nav.status == .resolved`, we must still trigger `ensureCauAnalyzed` // to make sure the value is up-to-date on incremental updates. assert(ip.getCau(cau_index).owner.unwrap().nav == nav_index); const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); try sema.addReferenceEntry(src, anal_unit); if (zcu.analysis_in_progress.contains(anal_unit)) { return sema.failWithOwnedErrorMsg(null, try sema.errMsg(.{ .base_node_inst = ip.getCau(cau_index).zir_index, .offset = LazySrcLoc.Offset.nodeOffset(0), }, "dependency loop detected", .{})); } return pt.ensureCauAnalyzed(cau_index); } fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { const pt = sema.pt; const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque); return Value.fromInterned(try pt.intern(.{ .opt = .{ .ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), .val = if (opt_val) |val| (try pt.getCoerced( Value.fromInterned(try sema.refValue(val.toIntern())), ptr_anyopaque_ty, )).toIntern() else .none, } })); } fn analyzeNavRef(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!Air.Inst.Ref { return sema.analyzeNavRefInner(src, nav_index, true); } /// Analyze a reference to the `Nav` at the given index. Ensures the underlying `Nav` is analyzed, but /// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeNavRef` wraps /// this function with `analyze_fn_body` set to true. fn analyzeNavRefInner(sema: *Sema, src: LazySrcLoc, orig_nav_index: InternPool.Nav.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; // TODO: if this is a `decl_ref` of a non-variable Nav, only depend on Nav type try sema.declareDependency(.{ .nav_val = orig_nav_index }); try sema.ensureNavResolved(src, orig_nav_index); const nav_val = zcu.navValue(orig_nav_index); const nav_index, const is_const = switch (ip.indexToKey(nav_val.toIntern())) { .variable => |v| .{ v.owner_nav, false }, .func => |f| .{ f.owner_nav, true }, .@"extern" => |e| .{ e.owner_nav, e.is_const }, else => .{ orig_nav_index, true }, }; const nav_info = ip.getNav(nav_index).status.resolved; const ptr_ty = try pt.ptrTypeSema(.{ .child = nav_val.typeOf(zcu).toIntern(), .flags = .{ .alignment = nav_info.alignment, .is_const = is_const, .address_space = nav_info.@"addrspace", }, }); if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(src, nav_index); } return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), .base_addr = .{ .nav = nav_index }, .byte_offset = 0, } }))); } fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) !void { const zcu = sema.pt.zcu; const ip = &zcu.intern_pool; const nav_val = zcu.navValue(nav_index); if (!ip.isFuncBody(nav_val.toIntern())) return; if (!try sema.fnHasRuntimeBits(nav_val.typeOf(zcu))) return; try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = nav_val.toIntern() })); try zcu.ensureFuncBodyAnalysisQueued(nav_val.toIntern()); } fn analyzeRef( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const operand_ty = sema.typeOf(operand); if (try sema.resolveValue(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { .@"extern" => |e| return sema.analyzeNavRef(src, e.owner_nav), .func => |f| return sema.analyzeNavRef(src, f.owner_nav), else => return uavRef(sema, val.toIntern()), } } try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); const ptr_type = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); const mut_ptr_type = try pt.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); const alloc = try block.addTy(.alloc, mut_ptr_type); try sema.storePtr(block, src, alloc, operand); // TODO: Replace with sema.coerce when that supports adding pointer constness. return sema.bitCast(block, ptr_type, alloc, src, null); } fn analyzeLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}), }; if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(pt)}); } if (try sema.typeHasOnePossibleValue(elem_ty)) |opv| { return Air.internedToRef(opv.toIntern()); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } } if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) { const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl; const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data; return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs); } return sema.fail(block, ptr_src, "unable to determine vector element index of type '{}'", .{ ptr_ty.fmt(pt), }); } return block.addTyOp(.load, elem_ty, ptr); } fn analyzeSlicePtr( sema: *Sema, block: *Block, slice_src: LazySrcLoc, slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveValue(slice)) |val| { if (val.isUndef(mod)) return pt.undefRef(result_ty); return Air.internedToRef(val.slicePtr(mod).toIntern()); } try sema.requireRuntimeBlock(block, slice_src, null); return block.addTyOp(.slice_ptr, result_ty, slice); } fn analyzeOptionalSlicePtr( sema: *Sema, block: *Block, opt_slice_src: LazySrcLoc, opt_slice: Air.Inst.Ref, opt_slice_ty: Type, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const result_ty = opt_slice_ty.optionalChild(mod).slicePtrFieldType(mod); if (try sema.resolveValue(opt_slice)) |opt_val| { if (opt_val.isUndef(mod)) return pt.undefRef(result_ty); const slice_ptr: InternPool.Index = if (opt_val.optionalValue(mod)) |val| val.slicePtr(mod).toIntern() else .null_value; return Air.internedToRef(slice_ptr); } try sema.requireRuntimeBlock(block, opt_slice_src, null); const slice = try block.addTyOp(.optional_payload, opt_slice_ty, opt_slice); return block.addTyOp(.slice_ptr, result_ty, slice); } fn analyzeSliceLen( sema: *Sema, block: *Block, src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; if (try sema.resolveValue(slice_inst)) |slice_val| { if (slice_val.isUndef(mod)) { return pt.undefRef(Type.usize); } return pt.intRef(Type.usize, try slice_val.sliceLen(pt)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); } fn analyzeIsNull( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const result_ty = Type.bool; if (try sema.resolveValue(operand)) |opt_val| { if (opt_val.isUndef(mod)) { return pt.undefRef(result_ty); } const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; return if (bool_value) .bool_true else .bool_false; } const inverted_non_null_res: Air.Inst.Ref = if (invert_logic) .bool_true else .bool_false; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; return block.addUnOp(air_tag, operand); } fn analyzePtrIsNonErrComptimeOnly( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ptr_ty = sema.typeOf(operand); assert(ptr_ty.zigTypeTag(mod) == .Pointer); const child_ty = ptr_ty.childType(mod); const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return .bool_true; if (child_tag == .ErrorSet) return .bool_false; assert(child_tag == .ErrorUnion); _ = block; _ = src; return .none; } fn analyzeIsNonErrComptimeOnly( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true; if (ot == .ErrorSet) return .bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(mod); if (payload_ty.zigTypeTag(mod) == .NoReturn) { return .bool_false; } if (operand.toIndex()) |operand_inst| { switch (sema.air_instructions.items(.tag)[@intFromEnum(operand_inst)]) { .wrap_errunion_payload => return .bool_true, .wrap_errunion_err => return .bool_false, else => {}, } } else if (operand == .undef) { return pt.undefRef(Type.bool); } else if (@intFromEnum(operand) < InternPool.static_len) { // None of the ref tags can be errors. return .bool_true; } const maybe_operand_val = try sema.resolveValue(operand); // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = ip.errorUnionSet(operand_ty.toIntern()); switch (set_ty) { .anyerror_type => {}, .adhoc_inferred_error_set_type => if (sema.fn_ret_ty_ies) |ies| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. switch (ies.resolved) { .anyerror_type => break :blk, .none => {}, else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk, } if (maybe_operand_val != null) break :blk; // Try to avoid resolving inferred error set if possible. if (ies.errors.count() != 0) return .none; switch (ies.resolved) { .anyerror_type => return .none, .none => {}, else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) { 0 => return .bool_true, else => return .none, }, } // We do not have a comptime answer because this inferred error // set is not resolved, and an instruction later in this function // body may or may not cause an error to be added to this set. return .none; }, else => switch (ip.indexToKey(set_ty)) { .error_set_type => |error_set_type| { if (error_set_type.names.len == 0) return .bool_true; }, .inferred_error_set_type => |func_index| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. try mod.maybeUnresolveIes(func_index); switch (ip.funcIesResolvedUnordered(func_index)) { .anyerror_type => break :blk, .none => {}, else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk, } if (maybe_operand_val != null) break :blk; if (sema.fn_ret_ty_ies) |ies| { if (ies.func == func_index) { // Try to avoid resolving inferred error set if possible. if (ies.errors.count() != 0) return .none; switch (ies.resolved) { .anyerror_type => return .none, .none => {}, else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) { 0 => return .bool_true, else => return .none, }, } // We do not have a comptime answer because this inferred error // set is not resolved, and an instruction later in this function // body may or may not cause an error to be added to this set. return .none; } } const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty); if (resolved_ty == .anyerror_type) break :blk; if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0) return .bool_true; }, else => unreachable, }, } if (maybe_operand_val) |err_union| { if (err_union.isUndef(mod)) { return pt.undefRef(Type.bool); } if (err_union.getErrorName(mod) == .none) { return .bool_true; } else { return .bool_false; } } return .none; } fn analyzeIsNonErr( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const result = try sema.analyzeIsNonErrComptimeOnly(block, src, operand); if (result == .none) { try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(.is_non_err, operand); } else { return result; } } fn analyzePtrIsNonErr( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand); if (result == .none) { try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(.is_non_err_ptr, operand); } else { return result; } } fn analyzeSlice( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_ptr: Air.Inst.Ref, uncasted_start: Air.Inst.Ref, uncasted_end_opt: Air.Inst.Ref, sentinel_opt: Air.Inst.Ref, sentinel_src: LazySrcLoc, ptr_src: LazySrcLoc, start_src: LazySrcLoc, end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(pt)}), }; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); elem_ty = ptr_ptr_child_ty.childType(mod); }, .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); if (double_child_ty.zigTypeTag(mod) == .Array) { ptr_sentinel = double_child_ty.sentinel(mod); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; elem_ty = double_child_ty.childType(mod); } else { const bounds_error_message = "slice of single-item pointer must have comptime-known bounds [0..0], [0..1], or [1..1]"; if (uncasted_end_opt == .none) { return sema.fail(block, src, bounds_error_message, .{}); } const start_value = try sema.resolveConstDefinedValue( block, start_src, uncasted_start, .{ .needed_comptime_reason = bounds_error_message }, ); const end_value = try sema.resolveConstDefinedValue( block, end_src, uncasted_end_opt, .{ .needed_comptime_reason = bounds_error_message }, ); if (try sema.compareScalar(start_value, .neq, end_value, Type.comptime_int)) { if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, Type.comptime_int)) { const msg = msg: { const msg = try sema.errMsg(start_src, bounds_error_message, .{}); errdefer msg.destroy(sema.gpa); try sema.errNote( start_src, msg, "expected '{}', found '{}'", .{ Value.zero_comptime_int.fmtValueSema(pt, sema), start_value.fmtValueSema(pt, sema), }, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, Type.comptime_int)) { const msg = msg: { const msg = try sema.errMsg(end_src, bounds_error_message, .{}); errdefer msg.destroy(sema.gpa); try sema.errNote( end_src, msg, "expected '{}', found '{}'", .{ Value.one_comptime_int.fmtValueSema(pt, sema), end_value.fmtValueSema(pt, sema), }, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } else { if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, Type.comptime_int)) { return sema.fail( block, end_src, "end index {} out of bounds for slice of single-item pointer", .{end_value.fmtValueSema(pt, sema)}, ); } } array_ty = try pt.arrayType(.{ .len = 1, .child = double_child_ty.toIntern(), }); const ptr_info = ptr_ptr_child_ty.ptrInfo(mod); slice_ty = try pt.ptrType(.{ .child = array_ty.toIntern(), .flags = .{ .alignment = ptr_info.flags.alignment, .is_const = ptr_info.flags.is_const, .is_allowzero = ptr_info.flags.is_allowzero, .is_volatile = ptr_info.flags.is_volatile, .address_space = ptr_info.flags.address_space, }, }); elem_ty = double_child_ty; } }, .Many, .C => { ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(mod); if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } } }, .Slice => { ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(pt)}), } const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) else if (array_ty.zigTypeTag(mod) == .Array) ptr: { var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type; assert(manyptr_ty_key.child == array_ty.toIntern()); assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); const new_ptr_ty = sema.typeOf(new_ptr); // true if and only if the end index of the slice, implicitly or explicitly, equals // the length of the underlying object being sliced. we might learn the length of the // underlying object because it is an array (which has the length in the type), or // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { const len_s_val = try pt.intValue( Type.usize, array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; return sema.fail( block, end_src, "end index {} out of bounds for array of length {}{s}", .{ end_val.fmtValueSema(pt, sema), len_val.fmtValueSema(pt, sema), sentinel_label, }, ); } // end_is_len is only true if we are NOT using the sentinel // length. For sentinel-length, we don't want the type to // contain the sentinel. if (end_val.eql(len_val, Type.usize, mod)) { end_is_len = true; } } break :e end; } break :e Air.internedToRef(len_val.toIntern()); } else if (slice_ty.isSlice(mod)) { if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveValue(ptr_or_slice)) |slice_val| { if (slice_val.isUndef(mod)) { return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; const slice_len = try slice_val.sliceLen(pt); const len_plus_sent = slice_len + @intFromBool(has_sentinel); const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else ""; return sema.fail( block, end_src, "end index {} out of bounds for slice of length {d}{s}", .{ end_val.fmtValueSema(pt, sema), try slice_val.sliceLen(pt), sentinel_label, }, ); } // If the slice has a sentinel, we consider end_is_len // is only true if it equals the length WITHOUT the // sentinel, so we don't add a sentinel type. const slice_len_val = try pt.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } } } break :e end; } break :e try sema.analyzeSliceLen(block, src, ptr_or_slice); } if (!end_is_len) { if (by_length) { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false); break :e try sema.coerce(block, Type.usize, uncasted_end, end_src); } else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); } return sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); }; const sentinel = s: { if (sentinel_opt != .none) { const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src); break :s try sema.resolveConstDefinedValue(block, sentinel_src, casted, .{ .needed_comptime_reason = "slice sentinel must be comptime-known", }); } // If we are slicing to the end of something that is sentinel-terminated // then the resulting slice type is also sentinel-terminated. if (end_is_len) { if (ptr_sentinel) |sent| { break :s sent; } } break :s null; }; const slice_sentinel = if (sentinel_opt != .none) sentinel else null; var checked_start_lte_end = by_length; var runtime_src: ?LazySrcLoc = null; // requirement: start <= end if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| { if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) { return sema.fail( block, start_src, "start index {} is larger than end index {}", .{ start_val.fmtValueSema(pt, sema), end_val.fmtValueSema(pt, sema), }, ); } checked_start_lte_end = true; if (try sema.resolveValue(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; const start_int = start_val.getUnsignedInt(pt).?; const end_int = end_val.getUnsignedInt(pt).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); const many_ptr_ty = try pt.manyConstPtrType(elem_ty); const many_ptr_val = try pt.getCoerced(ptr_val, many_ptr_ty); const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, pt); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, .needed_well_defined => |ty| return sema.fail( block, src, "comptime dereference requires '{}' to have a well-defined layout", .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, end_src, "slice end index {d} exceeds bounds of containing decl of type '{}'", .{ end_int, ty.fmt(pt) }, ), }; if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) { const msg = msg: { const msg = try sema.errMsg(src, "value in memory does not match slice sentinel", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "expected '{}', found '{}'", .{ expected_sentinel.fmtValueSema(pt, sema), actual_sentinel.fmtValueSema(pt, sema), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } else { runtime_src = ptr_src; } } else { runtime_src = start_src; } } else { runtime_src = end_src; } if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) { // requirement: start <= end assert(!block.is_comptime); try sema.requireRuntimeBlock(block, src, runtime_src.?); const ok = try block.addBinOp(.cmp_lte, start, end); if (!pt.zcu.comp.formatted_panics) { try sema.addSafetyCheck(block, src, ok, .start_index_greater_than_end); } else { try sema.safetyCheckFormatted(block, src, ok, "panicStartGreaterThanEnd", &.{ start, end }); } } const new_len = if (by_length) try sema.coerce(block, Type.usize, uncasted_end_opt, end_src) else try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod); const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = try new_len_val.toUnsignedIntSema(pt); const return_ty = try pt.ptrTypeSema(.{ .child = (try pt.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, .child = elem_ty.toIntern(), })).toIntern(), .flags = .{ .alignment = new_ptr_ty_info.flags.alignment, .is_const = new_ptr_ty_info.flags.is_const, .is_allowzero = new_allowzero, .is_volatile = new_ptr_ty_info.flags.is_volatile, .address_space = new_ptr_ty_info.flags.address_space, }, }); const opt_new_ptr_val = try sema.resolveValue(new_ptr); const new_ptr_val = opt_new_ptr_val orelse { const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null); } bounds_check: { const actual_len = if (array_ty.zigTypeTag(mod) == .Array) try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) l: { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); break :l if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); } else break :bounds_check; const actual_end = if (slice_sentinel != null) try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true) else end; try sema.panicIndexOutOfBounds(block, src, actual_end, actual_len, .cmp_lte); } // requirement: result[new_len] == slice_sentinel try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len); } return result; }; if (!new_ptr_val.isUndef(mod)) { return Air.internedToRef((try pt.getCoerced(new_ptr_val, return_ty)).toIntern()); } // Special case: @as([]i32, undefined)[x..x] if (new_len_int == 0) { return pt.undefRef(return_ty); } return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } const return_ty = try pt.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ .size = .Slice, .alignment = new_ptr_ty_info.flags.alignment, .is_const = new_ptr_ty_info.flags.is_const, .is_volatile = new_ptr_ty_info.flags.is_volatile, .is_allowzero = new_allowzero, .address_space = new_ptr_ty_info.flags.address_space, }, }); try sema.requireRuntimeBlock(block, src, runtime_src.?); if (block.wantSafety()) { // requirement: slicing C ptr is non-null if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, src, is_non_null, .unwrap_null); } // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); } else null; if (opt_len_inst) |len_inst| { const actual_end = if (slice_sentinel != null) try sema.analyzeArithmetic(block, .add, end, .one, src, end_src, end_src, true) else end; try sema.panicIndexOutOfBounds(block, src, actual_end, len_inst, .cmp_lte); } // requirement: start <= end try sema.panicIndexOutOfBounds(block, src, start, end, .cmp_lte); } const result = try block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ .ty = Air.internedToRef(return_ty.toIntern()), .payload = try sema.addExtra(Air.Bin{ .lhs = new_ptr, .rhs = new_len, }), } }, }); if (block.wantSafety()) { // requirement: result[new_len] == slice_sentinel try sema.panicSentinelMismatch(block, src, slice_sentinel, elem_ty, result, new_len); } return result; } /// Asserts that lhs and rhs types are both numeric. fn cmpNumeric( sema: *Sema, block: *Block, src: LazySrcLoc, uncasted_lhs: Air.Inst.Ref, uncasted_rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); assert(lhs_ty.isNumeric(mod)); assert(rhs_ty.isNumeric(mod)); const lhs_ty_tag = lhs_ty.zigTypeTag(mod); const rhs_ty_tag = rhs_ty.zigTypeTag(mod); const target = mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to // coerce to fixed-width float. const lhs = if (lhs_ty_tag == .ComptimeFloat and rhs_ty_tag == .Float) try sema.coerce(block, rhs_ty, uncasted_lhs, lhs_src) else uncasted_lhs; const rhs = if (lhs_ty_tag == .Float and rhs_ty_tag == .ComptimeFloat) try sema.coerce(block, lhs_ty, uncasted_rhs, rhs_src) else uncasted_rhs; const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(lhs)) |lhs_val| { if (try sema.resolveValue(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return pt.undefRef(Type.bool); } if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, pt, .sema)) .bool_true else .bool_false; } else { if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } break :src rhs_src; } } else { if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| { if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } } break :src lhs_src; } }; // TODO handle comparisons against lazy zero values // Some values can be compared against zero without being runtime-known or without forcing // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout // of this function if we don't need to. try sema.requireRuntimeBlock(block, src, runtime_src); // For floats, emit a float comparison instruction. const lhs_is_float = switch (lhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const rhs_is_float = switch (rhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; if (lhs_is_float and rhs_is_float) { // Smaller fixed-width floats coerce to larger fixed-width floats. // comptime_float coerces to fixed-width float. const dest_ty = x: { if (lhs_ty_tag == .ComptimeFloat) { break :x rhs_ty; } else if (rhs_ty_tag == .ComptimeFloat) { break :x lhs_ty; } if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { break :x lhs_ty; } else { break :x rhs_ty; } }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed // integer with + 1 bit. // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroSema(.gte, pt)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroSema(.gte, pt)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| { if (lhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (lhs_val.isNan(mod)) switch (op) { .neq => return .bool_true, else => return .bool_false, }; if (lhs_val.isInf(mod)) switch (op) { .neq => return .bool_true, .eq => return .bool_false, .gt, .gte => return if (lhs_val.isNegativeInf(mod)) .bool_false else .bool_true, .lt, .lte => return if (lhs_val.isNegativeInf(mod)) .bool_true else .bool_false, }; if (!rhs_is_signed) { switch (lhs_val.orderAgainstZero(pt)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return .bool_true, .gt => return .bool_false, else => {}, }, .lt => switch (op) { // LHS < 0, RHS is unsigned .neq, .lt, .lte => return .bool_true, .eq, .gt, .gte => return .bool_false, }, } } if (lhs_is_float) { if (lhs_val.floatHasFraction(mod)) { switch (op) { .eq => return .bool_false, .neq => return .bool_true, else => {}, } } var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, pt)); defer bigint.deinit(); if (lhs_val.floatHasFraction(mod)) { if (lhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { try bigint.addScalar(&bigint, 1); } } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { lhs_bits = lhs_val.intBitCountTwosComp(pt); } lhs_bits += @intFromBool(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return pt.undefRef(Type.bool); if (rhs_val.isNan(mod)) switch (op) { .neq => return .bool_true, else => return .bool_false, }; if (rhs_val.isInf(mod)) switch (op) { .neq => return .bool_true, .eq => return .bool_false, .gt, .gte => return if (rhs_val.isNegativeInf(mod)) .bool_true else .bool_false, .lt, .lte => return if (rhs_val.isNegativeInf(mod)) .bool_false else .bool_true, }; if (!lhs_is_signed) { switch (rhs_val.orderAgainstZero(pt)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return .bool_true, .lt => return .bool_false, else => {}, }, .lt => switch (op) { // RHS < 0, LHS is unsigned .neq, .gt, .gte => return .bool_true, .eq, .lt, .lte => return .bool_false, }, } } if (rhs_is_float) { if (rhs_val.floatHasFraction(mod)) { switch (op) { .eq => return .bool_false, .neq => return .bool_true, else => {}, } } var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, pt)); defer bigint.deinit(); if (rhs_val.floatHasFraction(mod)) { if (rhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { try bigint.addScalar(&bigint, 1); } } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { rhs_bits = rhs_val.intBitCountTwosComp(pt); } rhs_bits += @intFromBool(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed); } const dest_ty = if (dest_float_type) |ft| ft else blk: { const max_bits = @max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try pt.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs); } /// Asserts that LHS value is an int or comptime int and not undefined, and /// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to /// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. fn compareIntsOnlyPossibleResult( sema: *Sema, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type, ) Allocator.Error!?bool { const pt = sema.pt; const mod = pt.zcu; const rhs_info = rhs_ty.intInfo(mod); const vs_zero = lhs_val.orderAgainstZeroAdvanced(pt, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; // Anything vs. zero-sized type has guaranteed outcome. if (rhs_info.bits == 0) return switch (op) { .eq, .lte, .gte => is_zero, .neq, .lt, .gt => !is_zero, }; // Special case for i1, which can only be 0 or -1. // Zero and positive ints have guaranteed outcome. if (rhs_info.bits == 1 and rhs_info.signedness == .signed) { if (is_positive) return switch (op) { .gt, .gte, .neq => true, .lt, .lte, .eq => false, }; if (is_zero) return switch (op) { .gte => true, .lt => false, .gt, .lte, .eq, .neq => null, }; } // Negative vs. unsigned has guaranteed outcome. if (rhs_info.signedness == .unsigned and is_negative) return switch (op) { .eq, .gt, .gte => false, .neq, .lt, .lte => true, }; const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed); const req_bits = lhs_val.intBitCountTwosComp(pt) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. if (req_bits > 65535) return switch (op) { .lt, .lte => is_negative, .gt, .gte => is_positive, .eq => false, .neq => true, }; const fits = req_bits <= rhs_info.bits; // Oversized int has guaranteed outcome. switch (op) { .eq => return if (!fits) false else null, .neq => return if (!fits) true else null, .lt, .lte => if (!fits) return is_negative, .gt, .gte => if (!fits) return !is_negative, } // For any other comparison, we need to know if the LHS value is // equal to the maximum or minimum possible value of the RHS type. const is_min, const is_max = edge: { if (is_zero and rhs_info.signedness == .unsigned) break :edge .{ true, false }; if (req_bits != rhs_info.bits) break :edge .{ false, false }; const ty = try pt.intType( if (is_negative) .signed else .unsigned, @intCast(req_bits), ); const pop_count = lhs_val.popCount(ty, pt); if (is_negative) { break :edge .{ pop_count == 1, false }; } else { break :edge .{ false, pop_count == req_bits - sign_adj }; } }; assert(fits); return switch (op) { .lt => if (is_max) false else null, .lte => if (is_min) true else null, .gt => if (is_min) false else null, .gte => if (is_max) true else null, .eq, .neq => unreachable, }; } /// Asserts that lhs and rhs types are both vectors. fn cmpVector( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); assert(lhs_ty.zigTypeTag(mod) == .Vector); assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); const result_ty = try pt.vectorType(.{ .len = lhs_ty.vectorLen(mod), .child = .bool_type, }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveValue(casted_lhs)) |lhs_val| { if (try sema.resolveValue(casted_rhs)) |rhs_val| { if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return pt.undefRef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); return Air.internedToRef(cmp_val.toIntern()); } else { break :src rhs_src; } } else { break :src lhs_src; } }; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addCmpVector(casted_lhs, casted_rhs, op); } fn wrapOptional( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveValue(inst)) |val| { return Air.internedToRef((try sema.pt.intern(.{ .opt = .{ .ty = dest_ty.toIntern(), .val = val.toIntern(), } }))); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.wrap_optional, dest_ty, inst); } fn wrapErrorUnionPayload( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveValue(coerced)) |val| { return Air.internedToRef((try pt.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), .val = .{ .payload = val.toIntern() }, } }))); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } fn wrapErrorUnionSet( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveValue(inst)) |val| { const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; switch (dest_err_set_ty.toIntern()) { .anyerror_type => {}, .adhoc_inferred_error_set_type => ok: { const ies = sema.fn_ret_ty_ies.?; switch (ies.resolved) { .anyerror_type => break :ok, .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { break :ok; }, else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) { break :ok; }, } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { .error_set_type => |error_set_type| ok: { if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |func_index| ok: { // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. try mod.maybeUnresolveIes(func_index); switch (ip.funcIesResolvedUnordered(func_index)) { .anyerror_type => break :ok, .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { break :ok; }, else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) { break :ok; }, } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, else => unreachable, }, } return Air.internedToRef((try pt.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), .val = .{ .err_name = expected_name }, } }))); } try sema.requireRuntimeBlock(block, inst_src, null); const coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src); return block.addTyOp(.wrap_errunion_err, dest_ty, coerced); } fn unionToTag( sema: *Sema, block: *Block, enum_ty: Type, un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return Air.internedToRef(opv.toIntern()); } if (try sema.resolveValue(un)) |un_val| { const tag_val = un_val.unionTag(mod).?; if (tag_val.isUndef(mod)) return try pt.undefRef(enum_ty); return Air.internedToRef(tag_val.toIntern()); } try sema.requireRuntimeBlock(block, un_src, null); return block.addTyOp(.get_union_tag, enum_ty, un); } const PeerResolveStrategy = enum { /// The type is not known. /// If refined no further, this is equivalent to `exact`. unknown, /// The type may be an error set or error union. /// If refined no further, it is an error set. error_set, /// The type must be some error union. error_union, /// The type may be @TypeOf(null), an optional or a C pointer. /// If refined no further, it is @TypeOf(null). nullable, /// The type must be some optional or a C pointer. /// If refined no further, it is an optional. optional, /// The type must be either an array or a vector. /// If refined no further, it is an array. array, /// The type must be a vector. vector, /// The type must be a C pointer. c_ptr, /// The type must be a pointer (C or not). /// If refined no further, it is a non-C pointer. ptr, /// The type must be a function or a pointer to a function. /// If refined no further, it is a function. func, /// The type must be an enum literal, or some specific enum or union. Which one is decided /// afterwards based on the types in question. enum_or_union, /// The type must be some integer or float type. /// If refined no further, it is `comptime_int`. comptime_int, /// The type must be some float type. /// If refined no further, it is `comptime_float`. comptime_float, /// The type must be some float or fixed-width integer type. /// If refined no further, it is some fixed-width integer type. fixed_int, /// The type must be some fixed-width float type. fixed_float, /// The type must be a struct literal or tuple type. coercible_struct, /// The peers must all be of the same type. exact, /// Given two strategies, find a strategy that satisfies both, if one exists. If no such /// strategy exists, any strategy may be returned; an error will be emitted when the caller /// attempts to use the strategy to resolve the type. /// Strategy `a` comes from the peer in `reason_peer`, while strategy `b` comes from the peer at /// index `b_peer_idx`. `reason_peer` is updated to reflect the reason for the new strategy. fn merge(a: PeerResolveStrategy, b: PeerResolveStrategy, reason_peer: *usize, b_peer_idx: usize) PeerResolveStrategy { // Our merging should be order-independent. Thus, even though the union order is arbitrary, // by sorting the tags and switching first on the smaller, we have half as many cases to // worry about (since we avoid the duplicates). const s0_is_a = @intFromEnum(a) <= @intFromEnum(b); const s0 = if (s0_is_a) a else b; const s1 = if (s0_is_a) b else a; const ReasonMethod = enum { all_s0, all_s1, either, }; const reason_method: ReasonMethod, const strat: PeerResolveStrategy = switch (s0) { .unknown => .{ .all_s1, s1 }, .error_set => switch (s1) { .error_set => .{ .either, .error_set }, else => .{ .all_s0, .error_union }, }, .error_union => switch (s1) { .error_union => .{ .either, .error_union }, else => .{ .all_s0, .error_union }, }, .nullable => switch (s1) { .nullable => .{ .either, .nullable }, .c_ptr => .{ .all_s1, .c_ptr }, else => .{ .all_s0, .optional }, }, .optional => switch (s1) { .optional => .{ .either, .optional }, .c_ptr => .{ .all_s1, .c_ptr }, else => .{ .all_s0, .optional }, }, .array => switch (s1) { .array => .{ .either, .array }, .vector => .{ .all_s1, .vector }, else => .{ .all_s0, .array }, }, .vector => switch (s1) { .vector => .{ .either, .vector }, else => .{ .all_s0, .vector }, }, .c_ptr => switch (s1) { .c_ptr => .{ .either, .c_ptr }, else => .{ .all_s0, .c_ptr }, }, .ptr => switch (s1) { .ptr => .{ .either, .ptr }, else => .{ .all_s0, .ptr }, }, .func => switch (s1) { .func => .{ .either, .func }, else => .{ .all_s1, s1 }, // doesn't override anything later }, .enum_or_union => switch (s1) { .enum_or_union => .{ .either, .enum_or_union }, else => .{ .all_s0, .enum_or_union }, }, .comptime_int => switch (s1) { .comptime_int => .{ .either, .comptime_int }, else => .{ .all_s1, s1 }, // doesn't override anything later }, .comptime_float => switch (s1) { .comptime_float => .{ .either, .comptime_float }, else => .{ .all_s1, s1 }, // doesn't override anything later }, .fixed_int => switch (s1) { .fixed_int => .{ .either, .fixed_int }, else => .{ .all_s1, s1 }, // doesn't override anything later }, .fixed_float => switch (s1) { .fixed_float => .{ .either, .fixed_float }, else => .{ .all_s1, s1 }, // doesn't override anything later }, .coercible_struct => switch (s1) { .exact => .{ .all_s1, .exact }, else => .{ .all_s0, .coercible_struct }, }, .exact => .{ .all_s0, .exact }, }; switch (reason_method) { .all_s0 => { if (!s0_is_a) { reason_peer.* = b_peer_idx; } }, .all_s1 => { if (s0_is_a) { reason_peer.* = b_peer_idx; } }, .either => { // Prefer the earliest peer reason_peer.* = @min(reason_peer.*, b_peer_idx); }, } return strat; } fn select(ty: Type, mod: *Module) PeerResolveStrategy { return switch (ty.zigTypeTag(mod)) { .Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact, .NoReturn, .Undefined => .unknown, .Null => .nullable, .ComptimeInt => .comptime_int, .Int => .fixed_int, .ComptimeFloat => .comptime_float, .Float => .fixed_float, .Pointer => if (ty.ptrInfo(mod).flags.size == .C) .c_ptr else .ptr, .Array => .array, .Vector => .vector, .Optional => .optional, .ErrorSet => .error_set, .ErrorUnion => .error_union, .EnumLiteral, .Enum, .Union => .enum_or_union, .Struct => if (ty.isTupleOrAnonStruct(mod)) .coercible_struct else .exact, .Fn => .func, }; } }; const PeerTypeCandidateSrc = union(enum) { /// Do not print out error notes for candidate sources none: void, /// When we want to know the the src of candidate i, look up at /// index i in this slice override: []const ?LazySrcLoc, /// resolvePeerTypes originates from a @TypeOf(...) call typeof_builtin_call_node_offset: i32, pub fn resolve( self: PeerTypeCandidateSrc, block: *Block, candidate_i: usize, ) ?LazySrcLoc { return switch (self) { .none => null, .override => |candidate_srcs| if (candidate_i >= candidate_srcs.len) null else candidate_srcs[candidate_i], .typeof_builtin_call_node_offset => |node_offset| block.builtinCallArgSrc(node_offset, @intCast(candidate_i)), }; } }; const PeerResolveResult = union(enum) { /// The peer type resolution was successful, and resulted in the given type. success: Type, /// There was some generic conflict between two peers. conflict: struct { peer_idx_a: usize, peer_idx_b: usize, }, /// There was an error when resolving the type of a struct or tuple field. field_error: struct { /// The name of the field which caused the failure. field_name: InternPool.NullTerminatedString, /// The type of this field in each peer. field_types: []Type, /// The error from resolving the field type. Guaranteed not to be `success`. sub_result: *PeerResolveResult, }, fn report( result: PeerResolveResult, sema: *Sema, block: *Block, src: LazySrcLoc, instructions: []const Air.Inst.Ref, candidate_srcs: PeerTypeCandidateSrc, ) !*Module.ErrorMsg { const pt = sema.pt; var opt_msg: ?*Module.ErrorMsg = null; errdefer if (opt_msg) |msg| msg.destroy(sema.gpa); // If we mention fields we'll want to include field types, so put peer types in a buffer var peer_tys = try sema.arena.alloc(Type, instructions.len); for (peer_tys, instructions) |*ty, inst| { ty.* = sema.typeOf(inst); } var cur = result; while (true) { var conflict_idx: [2]usize = undefined; switch (cur) { .success => unreachable, .conflict => |conflict| { // Fall through to two-peer conflict handling below conflict_idx = .{ conflict.peer_idx_a, conflict.peer_idx_b, }; }, .field_error => |field_error| { const fmt = "struct field '{}' has conflicting types"; const args = .{field_error.field_name.fmt(&pt.zcu.intern_pool)}; if (opt_msg) |msg| { try sema.errNote(src, msg, fmt, args); } else { opt_msg = try sema.errMsg(src, fmt, args); } // Continue on to child error cur = field_error.sub_result.*; peer_tys = field_error.field_types; continue; }, } // This is the path for reporting a generic conflict between two peers. if (conflict_idx[1] < conflict_idx[0]) { // b comes first in source, so it's better if it comes first in the error std.mem.swap(usize, &conflict_idx[0], &conflict_idx[1]); } const conflict_tys: [2]Type = .{ peer_tys[conflict_idx[0]], peer_tys[conflict_idx[1]], }; const conflict_srcs: [2]?LazySrcLoc = .{ candidate_srcs.resolve(block, conflict_idx[0]), candidate_srcs.resolve(block, conflict_idx[1]), }; const fmt = "incompatible types: '{}' and '{}'"; const args = .{ conflict_tys[0].fmt(pt), conflict_tys[1].fmt(pt), }; const msg = if (opt_msg) |msg| msg: { try sema.errNote(src, msg, fmt, args); break :msg msg; } else msg: { const msg = try sema.errMsg(src, fmt, args); opt_msg = msg; break :msg msg; }; if (conflict_srcs[0]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[0].fmt(pt)}); if (conflict_srcs[1]) |src_loc| try sema.errNote(src_loc, msg, "type '{}' here", .{conflict_tys[1].fmt(pt)}); // No child error break; } return opt_msg.?; } }; fn resolvePeerTypes( sema: *Sema, block: *Block, src: LazySrcLoc, instructions: []const Air.Inst.Ref, candidate_srcs: PeerTypeCandidateSrc, ) !Type { switch (instructions.len) { 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } const peer_tys = try sema.arena.alloc(?Type, instructions.len); const peer_vals = try sema.arena.alloc(?Value, instructions.len); for (instructions, peer_tys, peer_vals) |inst, *ty, *val| { ty.* = sema.typeOf(inst); val.* = try sema.resolveValue(inst); } switch (try sema.resolvePeerTypesInner(block, src, peer_tys, peer_vals)) { .success => |ty| return ty, else => |result| { const msg = try result.report(sema, block, src, instructions, candidate_srcs); return sema.failWithOwnedErrorMsg(block, msg); }, } } fn resolvePeerTypesInner( sema: *Sema, block: *Block, src: LazySrcLoc, peer_tys: []?Type, peer_vals: []?Value, ) !PeerResolveResult { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; var strat_reason: usize = 0; var s: PeerResolveStrategy = .unknown; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; s = s.merge(PeerResolveStrategy.select(ty, mod), &strat_reason, i); } if (s == .unknown) { // The whole thing was noreturn or undefined - try to do an exact match s = .exact; } else { // There was something other than noreturn and undefined, so we can ignore those peers for (peer_tys) |*ty_ptr| { const ty = ty_ptr.* orelse continue; switch (ty.zigTypeTag(mod)) { .NoReturn, .Undefined => ty_ptr.* = null, else => {}, } } } const target = mod.getTarget(); switch (s) { .unknown => unreachable, .error_set => { var final_set: ?Type = null; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; if (ty.zigTypeTag(mod) != .ErrorSet) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; if (final_set) |cur_set| { final_set = try sema.maybeMergeErrorSets(block, src, cur_set, ty); } else { final_set = ty; } } return .{ .success = final_set.? }; }, .error_union => { var final_set: ?Type = null; for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| { const ty = ty_ptr.* orelse continue; const set_ty = switch (ty.zigTypeTag(mod)) { .ErrorSet => blk: { ty_ptr.* = null; // no payload to decide on val_ptr.* = null; break :blk ty; }, .ErrorUnion => blk: { const set_ty = ty.errorUnionSet(mod); ty_ptr.* = ty.errorUnionPayload(mod); if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) { .error_union => |eu| switch (eu.val) { .payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip), .err_name => val_ptr.* = null, }, .undef => val_ptr.* = Value.fromInterned(try pt.intern(.{ .undef = ty_ptr.*.?.toIntern() })), else => unreachable, }; break :blk set_ty; }, else => continue, // whole type is the payload }; if (final_set) |cur_set| { final_set = try sema.maybeMergeErrorSets(block, src, cur_set, set_ty); } else { final_set = set_ty; } } assert(final_set != null); const final_payload = switch (try sema.resolvePeerTypesInner( block, src, peer_tys, peer_vals, )) { .success => |ty| ty, else => |result| return result, }; return .{ .success = try pt.errorUnionType(final_set.?, final_payload) }; }, .nullable => { for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; if (!ty.eql(Type.null, mod)) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; } return .{ .success = Type.null }; }, .optional => { for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| { const ty = ty_ptr.* orelse continue; switch (ty.zigTypeTag(mod)) { .Null => { ty_ptr.* = null; val_ptr.* = null; }, .Optional => { ty_ptr.* = ty.optionalChild(mod); if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(mod)) opt_val.optionalValue(mod) else null; }, else => {}, } } const child_ty = switch (try sema.resolvePeerTypesInner( block, src, peer_tys, peer_vals, )) { .success => |ty| ty, else => |result| return result, }; return .{ .success = try pt.optionalType(child_ty.toIntern()) }; }, .array => { // Index of the first non-null peer var opt_first_idx: ?usize = null; // Index of the first array or vector peer (i.e. not a tuple) var opt_first_arr_idx: ?usize = null; // Set to non-null once we see any peer, even a tuple var len: u64 = undefined; var sentinel: ?Value = undefined; // Only set once we see a non-tuple peer var elem_ty: Type = undefined; for (peer_tys, 0..) |*ty_ptr, i| { const ty = ty_ptr.* orelse continue; if (!ty.isArrayOrVector(mod)) { // We allow tuples of the correct length. We won't validate their elem type, since the elements can be coerced. const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; if (opt_first_idx) |first_idx| { if (arr_like.len != len) return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } else { opt_first_idx = i; len = arr_like.len; } sentinel = null; continue; } const first_arr_idx = opt_first_arr_idx orelse { if (opt_first_idx == null) { opt_first_idx = i; len = ty.arrayLen(mod); sentinel = ty.sentinel(mod); } opt_first_arr_idx = i; elem_ty = ty.childType(mod); continue; }; if (ty.arrayLen(mod) != len) return .{ .conflict = .{ .peer_idx_a = first_arr_idx, .peer_idx_b = i, } }; const peer_elem_ty = ty.childType(mod); if (!peer_elem_ty.eql(elem_ty, mod)) coerce: { const peer_elem_coerces_to_elem = try sema.coerceInMemoryAllowed(block, elem_ty, peer_elem_ty, false, mod.getTarget(), src, src, null); if (peer_elem_coerces_to_elem == .ok) { break :coerce; } const elem_coerces_to_peer_elem = try sema.coerceInMemoryAllowed(block, peer_elem_ty, elem_ty, false, mod.getTarget(), src, src, null); if (elem_coerces_to_peer_elem == .ok) { elem_ty = peer_elem_ty; break :coerce; } return .{ .conflict = .{ .peer_idx_a = first_arr_idx, .peer_idx_b = i, } }; } if (sentinel) |cur_sent| { if (ty.sentinel(mod)) |peer_sent| { if (!peer_sent.eql(cur_sent, elem_ty, mod)) sentinel = null; } else { sentinel = null; } } } // There should always be at least one array or vector peer assert(opt_first_arr_idx != null); return .{ .success = try pt.arrayType(.{ .len = len, .child = elem_ty.toIntern(), .sentinel = if (sentinel) |sent_val| sent_val.toIntern() else .none, }) }; }, .vector => { var len: ?u64 = null; var first_idx: usize = undefined; for (peer_tys, peer_vals, 0..) |*ty_ptr, *val_ptr, i| { const ty = ty_ptr.* orelse continue; if (!ty.isArrayOrVector(mod)) { // Allow tuples of the correct length const arr_like = sema.typeIsArrayLike(ty) orelse return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; if (len) |expect_len| { if (arr_like.len != expect_len) return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } else { len = arr_like.len; first_idx = i; } // Tuples won't participate in the child type resolution. We'll resolve without // them, and if the tuples have a bad type, we'll get a coercion error later. ty_ptr.* = null; val_ptr.* = null; continue; } if (len) |expect_len| { if (ty.arrayLen(mod) != expect_len) return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } else { len = ty.arrayLen(mod); first_idx = i; } ty_ptr.* = ty.childType(mod); val_ptr.* = null; // multiple child vals, so we can't easily use them in PTR } const child_ty = switch (try sema.resolvePeerTypesInner( block, src, peer_tys, peer_vals, )) { .success => |ty| ty, else => |result| return result, }; return .{ .success = try pt.vectorType(.{ .len = @intCast(len.?), .child = child_ty.toIntern(), }) }; }, .c_ptr => { var opt_ptr_info: ?InternPool.Key.PtrType = null; var first_idx: usize = undefined; for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| { const ty = opt_ty orelse continue; switch (ty.zigTypeTag(mod)) { .ComptimeInt => continue, // comptime-known integers can always coerce to C pointers .Int => { if (opt_val != null) { // Always allow the coercion for comptime-known ints continue; } else { // Runtime-known, so check if the type is no bigger than a usize const ptr_bits = target.ptrBitWidth(); const bits = ty.intInfo(mod).bits; if (bits <= ptr_bits) continue; } }, .Null => continue, else => {}, } if (!ty.isPtrAtRuntime(mod)) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; // Goes through optionals const peer_info = ty.ptrInfo(mod); var ptr_info = opt_ptr_info orelse { opt_ptr_info = peer_info; opt_ptr_info.?.flags.size = .C; first_idx = i; continue; }; // Try peer -> cur, then cur -> peer ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; }).toIntern(); if (ptr_info.sentinel != .none and peer_info.sentinel != .none) { const peer_sent = try ip.getCoerced(sema.gpa, pt.tid, ptr_info.sentinel, ptr_info.child); const ptr_sent = try ip.getCoerced(sema.gpa, pt.tid, peer_info.sentinel, ptr_info.child); if (ptr_sent == peer_sent) { ptr_info.sentinel = ptr_sent; } else { ptr_info.sentinel = .none; } } else { ptr_info.sentinel = .none; } // Note that the align can be always non-zero; Module.ptrType will canonicalize it ptr_info.flags.alignment = InternPool.Alignment.min( if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else Type.fromInterned(ptr_info.child).abiAlignment(pt), if (peer_info.flags.alignment != .none) peer_info.flags.alignment else Type.fromInterned(peer_info.child).abiAlignment(pt), ); if (ptr_info.flags.address_space != peer_info.flags.address_space) { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size) { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const; ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile; opt_ptr_info = ptr_info; } return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { // If we've resolved to a `[]T` but then see a `[*]T`, we can resolve to a `[*]T` only // if there were no actual slices. Else, we want the slice index to report a conflict. var opt_slice_idx: ?usize = null; var opt_ptr_info: ?InternPool.Key.PtrType = null; var first_idx: usize = undefined; var other_idx: usize = undefined; // We sometimes need a second peer index to report a generic error for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(mod)) { .Pointer => ty.ptrInfo(mod), .Fn => .{ .child = ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), }, }, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, }; switch (peer_info.flags.size) { .One, .Many => {}, .Slice => opt_slice_idx = i, .C => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } var ptr_info = opt_ptr_info orelse { opt_ptr_info = peer_info; first_idx = i; continue; }; other_idx = i; // We want to return this in a lot of cases, so alias it here for convenience const generic_err: PeerResolveResult = .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; // Note that the align can be always non-zero; Type.ptr will canonicalize it ptr_info.flags.alignment = Alignment.min( if (ptr_info.flags.alignment != .none) ptr_info.flags.alignment else try sema.typeAbiAlignment(Type.fromInterned(ptr_info.child)), if (peer_info.flags.alignment != .none) peer_info.flags.alignment else try sema.typeAbiAlignment(Type.fromInterned(peer_info.child)), ); if (ptr_info.flags.address_space != peer_info.flags.address_space) { return generic_err; } if (ptr_info.packed_offset.bit_offset != peer_info.packed_offset.bit_offset or ptr_info.packed_offset.host_size != peer_info.packed_offset.host_size) { return generic_err; } ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const; ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile; const peer_sentinel: InternPool.Index = switch (peer_info.flags.size) { .One => switch (ip.indexToKey(peer_info.child)) { .array_type => |array_type| array_type.sentinel, else => .none, }, .Many, .Slice => peer_info.sentinel, .C => unreachable, }; const cur_sentinel: InternPool.Index = switch (ptr_info.flags.size) { .One => switch (ip.indexToKey(ptr_info.child)) { .array_type => |array_type| array_type.sentinel, else => .none, }, .Many, .Slice => ptr_info.sentinel, .C => unreachable, }; // We abstract array handling slightly so that tuple pointers can work like array pointers const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child)); const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child)); // This switch is just responsible for deciding the size and pointee (not including // single-pointer array sentinel). good: { switch (peer_info.flags.size) { .One => switch (ptr_info.flags.size) { .One => { if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } const cur_arr = cur_pointee_array orelse return generic_err; const peer_arr = peer_pointee_array orelse return generic_err; if (try sema.resolvePairInMemoryCoercible(block, src, cur_arr.elem_ty, peer_arr.elem_ty)) |elem_ty| { // *[n:x]T + *[n:y]T = *[n]T if (cur_arr.len == peer_arr.len) { ptr_info.child = (try pt.arrayType(.{ .len = cur_arr.len, .child = elem_ty.toIntern(), })).toIntern(); break :good; } // *[a]T + *[b]T = []T ptr_info.flags.size = .Slice; ptr_info.child = elem_ty.toIntern(); break :good; } if (peer_arr.elem_ty.toIntern() == .noreturn_type) { // *struct{} + *[a]T = []T ptr_info.flags.size = .Slice; ptr_info.child = cur_arr.elem_ty.toIntern(); break :good; } if (cur_arr.elem_ty.toIntern() == .noreturn_type) { // *[a]T + *struct{} = []T ptr_info.flags.size = .Slice; ptr_info.child = peer_arr.elem_ty.toIntern(); break :good; } return generic_err; }, .Many => { // Only works for *[n]T + [*]T -> [*]T const arr = peer_pointee_array orelse return generic_err; if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } if (arr.elem_ty.toIntern() == .noreturn_type) { // *struct{} + [*]T -> [*]T break :good; } return generic_err; }, .Slice => { // Only works for *[n]T + []T -> []T const arr = peer_pointee_array orelse return generic_err; if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } if (arr.elem_ty.toIntern() == .noreturn_type) { // *struct{} + []T -> []T break :good; } return generic_err; }, .C => unreachable, }, .Many => switch (ptr_info.flags.size) { .One => { // Only works for [*]T + *[n]T -> [*]T const arr = cur_pointee_array orelse return generic_err; if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .Many; ptr_info.child = pointee.toIntern(); break :good; } if (arr.elem_ty.toIntern() == .noreturn_type) { // [*]T + *struct{} -> [*]T ptr_info.flags.size = .Many; ptr_info.child = peer_info.child; break :good; } return generic_err; }, .Many => { if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } return generic_err; }, .Slice => { // Only works if no peers are actually slices if (opt_slice_idx) |slice_idx| { return .{ .conflict = .{ .peer_idx_a = slice_idx, .peer_idx_b = i, } }; } // Okay, then works for [*]T + "[]T" -> [*]T if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .Many; ptr_info.child = pointee.toIntern(); break :good; } return generic_err; }, .C => unreachable, }, .Slice => switch (ptr_info.flags.size) { .One => { // Only works for []T + *[n]T -> []T const arr = cur_pointee_array orelse return generic_err; if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| { ptr_info.flags.size = .Slice; ptr_info.child = pointee.toIntern(); break :good; } if (arr.elem_ty.toIntern() == .noreturn_type) { // []T + *struct{} -> []T ptr_info.flags.size = .Slice; ptr_info.child = peer_info.child; break :good; } return generic_err; }, .Many => { // Impossible! (current peer is an actual slice) return generic_err; }, .Slice => { if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| { ptr_info.child = pointee.toIntern(); break :good; } return generic_err; }, .C => unreachable, }, .C => unreachable, } } const sentinel_ty = switch (ptr_info.flags.size) { .One => switch (ip.indexToKey(ptr_info.child)) { .array_type => |array_type| array_type.child, else => ptr_info.child, }, .Many, .Slice, .C => ptr_info.child, }; sentinel: { no_sentinel: { if (peer_sentinel == .none) break :no_sentinel; if (cur_sentinel == .none) break :no_sentinel; const peer_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, peer_sentinel, sentinel_ty); const cur_sent_coerced = try ip.getCoerced(sema.gpa, pt.tid, cur_sentinel, sentinel_ty); if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel; // Sentinels match if (ptr_info.flags.size == .One) switch (ip.indexToKey(ptr_info.child)) { .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{ .len = array_type.len, .child = array_type.child, .sentinel = cur_sent_coerced, })).toIntern(), else => unreachable, } else { ptr_info.sentinel = cur_sent_coerced; } break :sentinel; } // Clear existing sentinel ptr_info.sentinel = .none; switch (ip.indexToKey(ptr_info.child)) { .array_type => |array_type| ptr_info.child = (try pt.arrayType(.{ .len = array_type.len, .child = array_type.child, .sentinel = .none, })).toIntern(), else => {}, } } opt_ptr_info = ptr_info; } // Before we succeed, check the pointee type. If we tried to apply PTR to (for instance) // &.{} and &.{}, we'll currently have a pointer type of `*[0]noreturn` - we wanted to // coerce the empty struct to a specific type, but no peer provided one. We need to // detect this case and emit an error. const pointee = opt_ptr_info.?.child; switch (pointee) { .noreturn_type => return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = other_idx, } }, else => switch (ip.indexToKey(pointee)) { .array_type => |array_type| if (array_type.child == .noreturn_type) return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = other_idx, } }, else => {}, }, } return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) }; }, .func => { var opt_cur_ty: ?Type = null; var first_idx: usize = undefined; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; const cur_ty = opt_cur_ty orelse { opt_cur_ty = ty; first_idx = i; continue; }; if (ty.zigTypeTag(mod) != .Fn) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; // ty -> cur_ty if (.ok == try sema.coerceInMemoryAllowedFns(block, cur_ty, ty, target, src, src)) { continue; } // cur_ty -> ty if (.ok == try sema.coerceInMemoryAllowedFns(block, ty, cur_ty, target, src, src)) { opt_cur_ty = ty; continue; } return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } return .{ .success = opt_cur_ty.? }; }, .enum_or_union => { var opt_cur_ty: ?Type = null; // The peer index which gave the current type var cur_ty_idx: usize = undefined; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; switch (ty.zigTypeTag(mod)) { .EnumLiteral, .Enum, .Union => {}, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } const cur_ty = opt_cur_ty orelse { opt_cur_ty = ty; cur_ty_idx = i; continue; }; // We want to return this in a lot of cases, so alias it here for convenience const generic_err: PeerResolveResult = .{ .conflict = .{ .peer_idx_a = cur_ty_idx, .peer_idx_b = i, } }; switch (cur_ty.zigTypeTag(mod)) { .EnumLiteral => { opt_cur_ty = ty; cur_ty_idx = i; }, .Enum => switch (ty.zigTypeTag(mod)) { .EnumLiteral => {}, .Enum => { if (!ty.eql(cur_ty, mod)) return generic_err; }, .Union => { const tag_ty = ty.unionTagTypeHypothetical(mod); if (!tag_ty.eql(cur_ty, mod)) return generic_err; opt_cur_ty = ty; cur_ty_idx = i; }, else => unreachable, }, .Union => switch (ty.zigTypeTag(mod)) { .EnumLiteral => {}, .Enum => { const cur_tag_ty = cur_ty.unionTagTypeHypothetical(mod); if (!ty.eql(cur_tag_ty, mod)) return generic_err; }, .Union => { if (!ty.eql(cur_ty, mod)) return generic_err; }, else => unreachable, }, else => unreachable, } } return .{ .success = opt_cur_ty.? }; }, .comptime_int => { for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; switch (ty.zigTypeTag(mod)) { .ComptimeInt => {}, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } } return .{ .success = Type.comptime_int }; }, .comptime_float => { for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat => {}, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } } return .{ .success = Type.comptime_float }; }, .fixed_int => { var idx_unsigned: ?usize = null; var idx_signed: ?usize = null; // TODO: this is for compatibility with legacy behavior. See beneath the loop. var any_comptime_known = false; for (peer_tys, peer_vals, 0..) |opt_ty, *ptr_opt_val, i| { const ty = opt_ty orelse continue; const opt_val = ptr_opt_val.*; const peer_tag = ty.zigTypeTag(mod); switch (peer_tag) { .ComptimeInt => { // If the value is undefined, we can't refine to a fixed-width int if (opt_val == null or opt_val.?.isUndef(mod)) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; any_comptime_known = true; ptr_opt_val.* = try sema.resolveLazyValue(opt_val.?); continue; }, .Int => {}, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } if (opt_val != null) any_comptime_known = true; const info = ty.intInfo(mod); const idx_ptr = switch (info.signedness) { .unsigned => &idx_unsigned, .signed => &idx_signed, }; const largest_idx = idx_ptr.* orelse { idx_ptr.* = i; continue; }; const cur_info = peer_tys[largest_idx].?.intInfo(mod); if (info.bits > cur_info.bits) { idx_ptr.* = i; } } if (idx_signed == null) { return .{ .success = peer_tys[idx_unsigned.?].? }; } if (idx_unsigned == null) { return .{ .success = peer_tys[idx_signed.?].? }; } const unsigned_info = peer_tys[idx_unsigned.?].?.intInfo(mod); const signed_info = peer_tys[idx_signed.?].?.intInfo(mod); if (signed_info.bits > unsigned_info.bits) { return .{ .success = peer_tys[idx_signed.?].? }; } // TODO: this is for compatibility with legacy behavior. Before this version of PTR was // implemented, the algorithm very often returned false positives, with the expectation // that you'd just hit a coercion error later. One of these was that for integers, the // largest type would always be returned, even if it couldn't fit everything. This had // an unintentional consequence to semantics, which is that if values were known at // comptime, they would be coerced down to the smallest type where possible. This // behavior is unintuitive and order-dependent, so in my opinion should be eliminated, // but for now we'll retain compatibility. if (any_comptime_known) { if (unsigned_info.bits > signed_info.bits) { return .{ .success = peer_tys[idx_unsigned.?].? }; } const idx = @min(idx_unsigned.?, idx_signed.?); return .{ .success = peer_tys[idx].? }; } return .{ .conflict = .{ .peer_idx_a = idx_unsigned.?, .peer_idx_b = idx_signed.?, } }; }, .fixed_float => { var opt_cur_ty: ?Type = null; for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| { const ty = opt_ty orelse continue; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .ComptimeInt => {}, .Int => { if (opt_val == null) return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; }, .Float => { if (opt_cur_ty) |cur_ty| { if (cur_ty.eql(ty, mod)) continue; // Recreate the type so we eliminate any c_longdouble const bits = @max(cur_ty.floatBits(target), ty.floatBits(target)); opt_cur_ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, 64 => Type.f64, 80 => Type.f80, 128 => Type.f128, else => unreachable, }; } else { opt_cur_ty = ty; } }, else => return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }, } } // Note that fixed_float is only chosen if there is at least one fixed-width float peer, // so opt_cur_ty must be non-null. return .{ .success = opt_cur_ty.? }; }, .coercible_struct => { // First, check that every peer has the same approximate structure (field count and names) var opt_first_idx: ?usize = null; var is_tuple: bool = undefined; var field_count: usize = undefined; // Only defined for non-tuples. var field_names: []InternPool.NullTerminatedString = undefined; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; if (!ty.isTupleOrAnonStruct(mod)) { return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, } }; } const first_idx = opt_first_idx orelse { opt_first_idx = i; is_tuple = ty.isTuple(mod); field_count = ty.structFieldCount(mod); if (!is_tuple) { const names = ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip); field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names); } continue; }; if (ty.isTuple(mod) != is_tuple or ty.structFieldCount(mod) != field_count) { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } if (!is_tuple) { for (field_names, 0..) |expected, field_index_usize| { const field_index: u32 = @intCast(field_index_usize); const actual = ty.structFieldName(field_index, mod).unwrap().?; if (actual == expected) continue; return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } } } assert(opt_first_idx != null); // Now, we'll recursively resolve the field types const field_types = try sema.arena.alloc(InternPool.Index, field_count); // Values for `comptime` fields - `.none` used for non-comptime fields const field_vals = try sema.arena.alloc(InternPool.Index, field_count); const sub_peer_tys = try sema.arena.alloc(?Type, peer_tys.len); const sub_peer_vals = try sema.arena.alloc(?Value, peer_vals.len); for (field_types, field_vals, 0..) |*field_ty, *field_val, field_index| { // Fill buffers with types and values of the field for (peer_tys, peer_vals, sub_peer_tys, sub_peer_vals) |opt_ty, opt_val, *peer_field_ty, *peer_field_val| { const ty = opt_ty orelse { peer_field_ty.* = null; peer_field_val.* = null; continue; }; peer_field_ty.* = ty.structFieldType(field_index, mod); peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null; } // Resolve field type recursively field_ty.* = switch (try sema.resolvePeerTypesInner(block, src, sub_peer_tys, sub_peer_vals)) { .success => |ty| ty.toIntern(), else => |result| { const result_buf = try sema.arena.create(PeerResolveResult); result_buf.* = result; const field_name = if (is_tuple) try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls) else field_names[field_index]; // The error info needs the field types, but we can't reuse sub_peer_tys // since the recursive call may have clobbered it. const peer_field_tys = try sema.arena.alloc(Type, peer_tys.len); for (peer_tys, peer_field_tys) |opt_ty, *peer_field_ty| { // Already-resolved types won't be referenced by the error so it's fine // to leave them undefined. const ty = opt_ty orelse continue; peer_field_ty.* = ty.structFieldType(field_index, mod); } return .{ .field_error = .{ .field_name = field_name, .field_types = peer_field_tys, .sub_result = result_buf, } }; }, }; // Decide if this is a comptime field. If it is comptime in all peers, and the // coerced comptime values are all the same, we say it is comptime, else not. var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; try struct_ty.resolveStructFieldInits(pt); const uncoerced_field_val = try struct_ty.structFieldValueComptime(pt, field_index) orelse { comptime_val = null; break; }; const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern()); const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) { // It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway error.NotCoercible => { comptime_val = null; break; }, else => |e| return e, }; const coerced_val = (try sema.resolveValue(coerced_inst)) orelse continue; const existing = comptime_val orelse { comptime_val = coerced_val; continue; }; if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), mod)) { comptime_val = null; break; } } field_val.* = if (comptime_val) |v| v.toIntern() else .none; } const final_ty = try ip.getAnonStructType(mod.gpa, pt.tid, .{ .types = field_types, .names = if (is_tuple) &.{} else field_names, .values = field_vals, }); return .{ .success = Type.fromInterned(final_ty) }; }, .exact => { var expect_ty: ?Type = null; var first_idx: usize = undefined; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; if (expect_ty) |expect| { if (!ty.eql(expect, mod)) return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } else { expect_ty = ty; first_idx = i; } } return .{ .success = expect_ty.? }; }, } } fn maybeMergeErrorSets(sema: *Sema, block: *Block, src: LazySrcLoc, e0: Type, e1: Type) !Type { // e0 -> e1 if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e1, e0, src, src)) { return e1; } // e1 -> e0 if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, e0, e1, src, src)) { return e0; } return sema.errorSetMerge(e0, e1); } fn resolvePairInMemoryCoercible(sema: *Sema, block: *Block, src: LazySrcLoc, ty_a: Type, ty_b: Type) !?Type { const target = sema.pt.zcu.getTarget(); // ty_b -> ty_a if (.ok == try sema.coerceInMemoryAllowed(block, ty_a, ty_b, true, target, src, src, null)) { return ty_a; } // ty_a -> ty_b if (.ok == try sema.coerceInMemoryAllowed(block, ty_b, ty_a, true, target, src, src, null)) { return ty_b; } return null; } const ArrayLike = struct { len: u64, /// `noreturn` indicates that this type is `struct{}` so can coerce to anything elem_ty: Type, }; fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { const pt = sema.pt; const mod = pt.zcu; return switch (ty.zigTypeTag(mod)) { .Array => .{ .len = ty.arrayLen(mod), .elem_ty = ty.childType(mod), }, .Struct => { const field_count = ty.structFieldCount(mod); if (field_count == 0) return .{ .len = 0, .elem_ty = Type.noreturn, }; if (!ty.isTuple(mod)) return null; const elem_ty = ty.structFieldType(0, mod); for (1..field_count) |i| { if (!ty.structFieldType(i, mod).eql(elem_ty, mod)) { return null; } } return .{ .len = field_count, .elem_ty = elem_ty, }; }, else => null, }; } pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; if (sema.fn_ret_ty_ies) |ies| { try sema.resolveInferredErrorSetPtr(block, src, ies); assert(ies.resolved != .none); ip.funcIesResolved(sema.func_index).* = ies.resolved; } } pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; try Type.fromInterned(fn_ty_info.return_type).resolveFully(pt); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try pt.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(pt); } } fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { return val.resolveLazy(sema.arena, sema.pt); } /// Resolve a struct's alignment only without triggering resolution of its layout. /// Asserts that the alignment is not yet resolved and the layout is non-packed. pub fn resolveStructAlignment( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, ) SemaError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const target = mod.getTarget(); assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); assert(struct_type.layout != .@"packed"); assert(struct_type.flagsUnordered(ip).alignment == .none); const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); // We'll guess "pointer-aligned", if the struct has an // underaligned pointer field then some allocations // might require explicit alignment. if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return; try sema.resolveTypeFieldsStruct(ty, struct_type); // We'll guess "pointer-aligned", if the struct has an // underaligned pointer field then some allocations // might require explicit alignment. if (struct_type.assumePointerAlignedIfWip(ip, ptr_align)) return; defer struct_type.clearAlignmentWip(ip); var alignment: Alignment = .@"1"; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; const field_align = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, .sema, ); alignment = alignment.maxStrict(field_align); } struct_type.setAlignment(ip, alignment); } pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.haveLayout(ip)) return; try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.layout == .@"packed") { semaBackingIntType(pt, struct_type) catch |err| switch (err) { error.OutOfMemory, error.AnalysisFail => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; return; } if (struct_type.setLayoutWip(ip)) { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct '{}' depends on itself", .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } defer struct_type.clearLayoutWip(ip); const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len); const sizes = try sema.arena.alloc(u64, struct_type.field_types.len); var big_align: Alignment = .@"1"; for (aligns, sizes, 0..) |*field_align, *field_size, i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) { struct_type.offsets.get(ip)[i] = 0; field_size.* = 0; field_align.* = .none; continue; } field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); return err; }, else => return err, }; field_align.* = try pt.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, .sema, ); big_align = big_align.maxStrict(field_align.*); } if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(null, msg); } if (struct_type.flagsUnordered(ip).assumed_pointer_aligned and big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8)))) { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct layout depends on being pointer aligned", .{}, ); return sema.failWithOwnedErrorMsg(null, msg); } if (struct_type.hasReorderedFields()) { const runtime_order = struct_type.runtime_order.get(ip); for (runtime_order, 0..) |*ro, i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) { ro.* = .omitted; } else { ro.* = @enumFromInt(i); } } const RuntimeOrder = InternPool.LoadedStructType.RuntimeOrder; const AlignSortContext = struct { aligns: []const Alignment, fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool { if (a == .omitted) return false; if (b == .omitted) return true; const a_align = ctx.aligns[@intFromEnum(a)]; const b_align = ctx.aligns[@intFromEnum(b)]; return a_align.compare(.gt, b_align); } }; if (struct_type.isTuple(ip) or !zcu.backendSupportsFeature(.field_reordering)) { // TODO: don't handle tuples differently. This logic exists only because it // uncovers latent bugs if removed. Fix the latent bugs and remove this logic! // Likewise, implement field reordering support in all the backends! // This logic does not reorder fields; it only moves the omitted ones to the end // so that logic elsewhere does not need to special-case tuples. var i: usize = 0; var off: usize = 0; while (i + off < runtime_order.len) { if (runtime_order[i + off] == .omitted) { off += 1; continue; } runtime_order[i] = runtime_order[i + off]; i += 1; } @memset(runtime_order[i..], .omitted); } else { mem.sortUnstable(RuntimeOrder, runtime_order, AlignSortContext{ .aligns = aligns, }, AlignSortContext.lessThan); } } // Calculate size, alignment, and field offsets. const offsets = struct_type.offsets.get(ip); var it = struct_type.iterateRuntimeOrder(ip); var offset: u64 = 0; while (it.next()) |i| { offsets[i] = @intCast(aligns[i].forward(offset)); offset = offsets[i] + sizes[i]; } struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align); _ = try sema.typeRequiresComptime(ty); } fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructType) CompileError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const cau_index = struct_type.cau.unwrap().?; const zir = zcu.namespacePtr(struct_type.namespace.unwrap().?).fileScope(zcu).zir; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); var block: Block = .{ .parent = null, .sema = &sema, .namespace = ip.getCau(cau_index).namespace, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, .type_name_ctx = struct_type.name, }; defer assert(block.instructions.items.len == 0); const fields_bit_sum = blk: { var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); accumulator += try field_ty.bitSizeAdvanced(pt, .sema); } break :blk accumulator; }; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); if (small.has_backing_int) { var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; const captures_len = if (small.has_captures_len) blk: { const captures_len = zir.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; extra_index += @intFromBool(small.has_fields_len); extra_index += @intFromBool(small.has_decls_len); extra_index += captures_len; const backing_int_body_len = zir.extra[extra_index]; extra_index += 1; const backing_int_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .node_offset_container_tag = 0 }, }; const backing_int_ty = blk: { if (backing_int_body_len == 0) { const backing_int_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref); } else { const body = zir.bodySlice(extra_index, backing_int_body_len); const ty_ref = try sema.resolveInlineBody(&block, body, zir_index); break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref); } }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } else { if (fields_bit_sum > std.math.maxInt(u16)) { return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } try sema.flushExports(); } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { const pt = sema.pt; const mod = pt.zcu; if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(pt)}); } if (backing_int_ty.bitSize(pt) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", .{ backing_int_ty.fmt(pt), backing_int_ty.bitSize(pt), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const pt = sema.pt; if (!ty.isIndexable(pt.zcu)) { const msg = msg: { const msg = try sema.errMsg(src, "type '{}' does not support indexing", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "operand must be an array, slice, tuple, or vector", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Pointer) { switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, } } const msg = msg: { const msg = try sema.errMsg(src, "type '{}' is not an indexable pointer", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.errNote(src, msg, "operand must be a slice, a many pointer or a pointer to an array", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); } /// Resolve a unions's alignment only without triggering resolution of its layout. /// Asserts that the alignment is not yet resolved. pub fn resolveUnionAlignment( sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType, ) SemaError!void { const zcu = sema.pt.zcu; const ip = &zcu.intern_pool; const target = zcu.getTarget(); assert(sema.owner.unwrap().cau == union_type.cau); assert(!union_type.haveLayout(ip)); const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. if (union_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return; try sema.resolveTypeFieldsUnion(ty, union_type); var max_align: Alignment = .@"1"; for (0..union_type.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); if (!(try sema.typeHasRuntimeBits(field_ty))) continue; const explicit_align = union_type.fieldAlign(ip, field_index); const field_align = if (explicit_align != .none) explicit_align else try sema.typeAbiAlignment(field_ty); max_align = max_align.max(field_align); } union_type.setAlignment(ip, max_align); } /// This logic must be kept in sync with `Module.getUnionLayout`. pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const pt = sema.pt; const ip = &pt.zcu.intern_pool; try sema.resolveTypeFieldsUnion(ty, ip.loadUnionType(ty.ip_index)); // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); assert(sema.owner.unwrap().cau == union_type.cau); const old_flags = union_type.flagsUnordered(ip); switch (old_flags.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try sema.errMsg( ty.srcLoc(pt.zcu), "union '{}' depends on itself", .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); }, .have_layout, .fully_resolved_wip, .fully_resolved => return, } errdefer union_type.setStatusIfLayoutWip(ip, old_flags.status); union_type.setStatus(ip, .layout_wip); var max_size: u64 = 0; var max_align: Alignment = .@"1"; for (0..union_type.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); if (try sema.typeRequiresComptime(field_ty) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment? max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.addFieldErrNote(ty, field_index, msg, "while checking this field", .{}); return err; }, else => return err, }); const explicit_align = union_type.fieldAlign(ip, field_index); const field_align = if (explicit_align != .none) explicit_align else try sema.typeAbiAlignment(field_ty); max_align = max_align.max(field_align); } const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty)); const size, const alignment, const padding = if (has_runtime_tag) layout: { const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty); const tag_align = try sema.typeAbiAlignment(enum_tag_type); const tag_size = try sema.typeAbiSize(enum_tag_type); // Put the tag before or after the payload depending on which one's // alignment is greater. var size: u64 = 0; var padding: u32 = 0; if (tag_align.order(max_align).compare(.gte)) { // {Tag, Payload} size += tag_size; size = max_align.forward(size); size += max_size; const prev_size = size; size = tag_align.forward(size); padding = @intCast(size - prev_size); } else { // {Payload, Tag} size += max_size; size = switch (pt.zcu.getTarget().ofmt) { .c => max_align, else => tag_align, }.forward(size); size += tag_size; const prev_size = size; size = max_align.forward(size); padding = @intCast(size - prev_size); } break :layout .{ size, max_align.max(tag_align), padding }; } else .{ max_align.forward(max_size), max_align, 0 }; union_type.setHaveLayout(ip, @intCast(size), padding, alignment); if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { const msg = try sema.errMsg( ty.srcLoc(pt.zcu), "union layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(null, msg); } if (union_type.flagsUnordered(ip).assumed_pointer_aligned and alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8)))) { const msg = try sema.errMsg( ty.srcLoc(pt.zcu), "union layout depends on being pointer aligned", .{}, ); return sema.failWithOwnedErrorMsg(null, msg); } } /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); // After we have resolve struct layout we have to go over the fields again to // make sure pointer fields get their child types resolved as well. // See also similar code for unions. for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); try field_ty.resolveFully(pt); } } pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; assert(sema.owner.unwrap().cau == union_obj.cau); switch (union_obj.flagsUnordered(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, } { // After we have resolve union layout we have to go over the fields again to // make sure pointer fields get their child types resolved as well. // See also similar code for structs. const prev_status = union_obj.flagsUnordered(ip).status; errdefer union_obj.setStatus(ip, prev_status); union_obj.setStatus(ip, .fully_resolved_wip); for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); try field_ty.resolveFully(pt); } union_obj.setStatus(ip, .fully_resolved); } // And let's not forget comptime-only status. _ = try sema.typeRequiresComptime(ty); } pub fn resolveTypeFieldsStruct( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, ) SemaError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.haveFieldTypes(ip)) return; if (struct_type.setFieldTypesWip(ip)) { const msg = try sema.errMsg( Type.fromInterned(ty).srcLoc(zcu), "struct '{}' depends on itself", .{Type.fromInterned(ty).fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } defer struct_type.clearFieldTypesWip(ip); semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) { error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; try sema.resolveStructLayout(ty); if (struct_type.setInitsWip(ip)) { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct '{}' depends on itself", .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); } defer struct_type.clearInitsWip(ip); semaStructFieldInits(pt, sema.arena, struct_type) catch |err| switch (err) { error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); } pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(sema.owner.unwrap().cau == union_type.cau); switch (union_type.flagsUnordered(ip).status) { .none => {}, .field_types_wip => { const msg = try sema.errMsg( ty.srcLoc(zcu), "union '{}' depends on itself", .{ty.fmt(pt)}, ); return sema.failWithOwnedErrorMsg(null, msg); }, .have_field_types, .have_layout, .layout_wip, .fully_resolved_wip, .fully_resolved, => return, } union_type.setStatus(ip, .field_types_wip); errdefer union_type.setStatus(ip, .none); semaUnionFields(pt, sema.arena, ty.toIntern(), union_type) catch |err| switch (err) { error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.setStatus(ip, .have_field_types); } /// Returns a normal error set corresponding to the fully populated inferred /// error set. fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, ies_index: InternPool.Index, ) CompileError!InternPool.Index { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const func_index = ip.iesFuncIndex(ies_index); const func = zcu.funcInfo(func_index); try sema.declareDependency(.{ .interned = func_index }); // resolved IES try zcu.maybeUnresolveIes(func_index); const resolved_ty = func.resolvedErrorSetUnordered(ip); if (resolved_ty != .none) return resolved_ty; if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .func = func_index }))) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, // we need to ensure the function body is analyzed of the inferred error // set. However, in the case of comptime/inline function calls with // inferred error sets, each call gets an adhoc InferredErrorSet object, which // has no corresponding function body. const ies_func_info = zcu.typeToFunc(Type.fromInterned(func.ty)).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); } else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) { if (ies_func_info.is_generic) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(zcu.navSrcLoc(func.owner_nav), msg, "generic function declared here", .{}); break :msg msg; }); } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); try pt.ensureFuncBodyAnalyzed(func_index); } // This will now have been resolved by the logic at the end of `Module.analyzeFnBody` // which calls `resolveInferredErrorSetPtr`. const final_resolved_ty = func.resolvedErrorSetUnordered(ip); assert(final_resolved_ty != .none); return final_resolved_ty; } pub fn resolveInferredErrorSetPtr( sema: *Sema, block: *Block, src: LazySrcLoc, ies: *InferredErrorSet, ) CompileError!void { const pt = sema.pt; const ip = &pt.zcu.intern_pool; if (ies.resolved != .none) return; const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern()); for (ies.inferred_error_sets.keys()) |other_ies_index| { if (ies_index == other_ies_index) continue; switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) { .anyerror_type => { ies.resolved = .anyerror_type; return; }, else => |error_set_ty_index| { const names = ip.indexToKey(error_set_ty_index).error_set_type.names; for (names.get(ip)) |name| { try ies.errors.put(sema.arena, name, {}); } }, } } const resolved_error_set_ty = try pt.errorSetFromUnsortedNames(ies.errors.keys()); ies.resolved = resolved_error_set_ty.toIntern(); } fn resolveAdHocInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, value: InternPool.Index, ) CompileError!InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value)); if (new_ty == .none) return value; return ip.getCoerced(gpa, pt.tid, value, new_ty); } fn resolveAdHocInferredErrorSetTy( sema: *Sema, block: *Block, src: LazySrcLoc, ty: InternPool.Index, ) CompileError!InternPool.Index { const ies = sema.fn_ret_ty_ies orelse return .none; const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const error_union_info = switch (ip.indexToKey(ty)) { .error_union_type => |x| x, else => return .none, }; if (error_union_info.error_set_type != .adhoc_inferred_error_set_type) return .none; try sema.resolveInferredErrorSetPtr(block, src, ies); const new_ty = try pt.intern(.{ .error_union_type = .{ .error_set_type = ies.resolved, .payload_type = error_union_info.payload_type, } }); return new_ty; } fn resolveInferredErrorSetTy( sema: *Sema, block: *Block, src: LazySrcLoc, ty: InternPool.Index, ) CompileError!InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; if (ty == .anyerror_type) return ty; switch (ip.indexToKey(ty)) { .error_set_type => return ty, .inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty), else => unreachable, } } fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct { /// fields_len usize, Zir.Inst.StructDecl.Small, /// extra_index usize, } { const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len; const captures_len = if (small.has_captures_len) blk: { const captures_len = zir.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; extra_index += captures_len; // The backing integer cannot be handled until `resolveStructLayout()`. if (small.has_backing_int) { const backing_int_body_len = zir.extra[extra_index]; extra_index += 1; // backing_int_body_len if (backing_int_body_len == 0) { extra_index += 1; // backing_int_ref } else { extra_index += backing_int_body_len; // backing_int_body_inst } } // Skip over decls. extra_index += decls_len; return .{ fields_len, small, extra_index }; } fn semaStructFields( pt: Zcu.PerThread, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const cau_index = struct_type.cau.unwrap().?; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); if (fields_len == 0) switch (struct_type.layout) { .@"packed" => { try semaBackingIntType(pt, struct_type); return; }, .auto, .@"extern" => { struct_type.setLayoutResolved(ip, 0, .none); return; }, }; var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = arena, .code = zir, .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .namespace = namespace_index, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); const Field = struct { type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; var any_aligned = false; { const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; const flags_index = extra_index; var bit_bag_index: usize = flags_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_init = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; if (is_comptime) struct_type.setFieldComptime(ip, field_i); var opt_field_name_zir: ?[:0]const u8 = null; if (!small.is_tuple) { opt_field_name_zir = zir.nullTerminatedString(@enumFromInt(zir.extra[extra_index])); extra_index += 1; } extra_index += 1; // doc_comment fields[field_i] = .{}; if (has_type_body) { fields[field_i].type_body_len = zir.extra[extra_index]; } else { fields[field_i].type_ref = @enumFromInt(zir.extra[extra_index]); } extra_index += 1; // This string needs to outlive the ZIR code. if (opt_field_name_zir) |field_name_zir| { const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); assert(struct_type.addFieldName(ip, field_name) == null); } if (has_align) { fields[field_i].align_body_len = zir.extra[extra_index]; extra_index += 1; any_aligned = true; } if (has_init) { fields[field_i].init_body_len = zir.extra[extra_index]; extra_index += 1; any_inits = true; } } } // Next we do only types and alignments, saving the inits for a second pass, // so that init values may depend on type layout. for (fields, 0..) |zir_field, field_i| { const ty_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .container_field_type = @intCast(field_i) }, }; const field_ty: Type = ty: { if (zir_field.type_ref != .none) { break :ty try sema.resolveType(&block_scope, ty_src, zir_field.type_ref); } assert(zir_field.type_body_len != 0); const body = zir.bodySlice(extra_index, zir_field.type_body_len); extra_index += body.len; const ty_ref = try sema.resolveInlineBody(&block_scope, body, zir_index); break :ty try sema.analyzeAsType(&block_scope, ty_src, ty_ref); }; if (field_ty.isGenericPoison()) { return error.GenericPoison; } struct_type.field_types.get(ip)[field_i] = field_ty.toIntern(); if (field_ty.zigTypeTag(zcu) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } if (field_ty.zigTypeTag(zcu) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(ty_src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } switch (struct_type.layout) { .@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); }, .@"packed" => if (!try sema.validatePackedType(field_ty)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); }, else => {}, } if (zir_field.align_body_len > 0) { const body = zir.bodySlice(extra_index, zir_field.align_body_len); extra_index += body.len; const align_ref = try sema.resolveInlineBody(&block_scope, body, zir_index); const align_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .container_field_align = @intCast(field_i) }, }; const field_align = try sema.analyzeAsAlign(&block_scope, align_src, align_ref); struct_type.field_aligns.get(ip)[field_i] = field_align; } extra_index += zir_field.init_body_len; } struct_type.clearFieldTypesWip(ip); if (!any_inits) struct_type.setHaveFieldInits(ip); try sema.flushExports(); } // This logic must be kept in sync with `semaStructFields` fn semaStructFieldInits( pt: Zcu.PerThread, arena: Allocator, struct_type: InternPool.LoadedStructType, ) CompileError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; assert(!struct_type.haveFieldInits(ip)); const cau_index = struct_type.cau.unwrap().?; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = arena, .code = zir, .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .namespace = namespace_index, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); const Field = struct { type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; { const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; const flags_index = extra_index; var bit_bag_index: usize = flags_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_init = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 2; const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; if (!small.is_tuple) { extra_index += 1; } extra_index += 1; // doc_comment fields[field_i] = .{}; if (has_type_body) fields[field_i].type_body_len = zir.extra[extra_index]; extra_index += 1; if (has_align) { fields[field_i].align_body_len = zir.extra[extra_index]; extra_index += 1; } if (has_init) { fields[field_i].init_body_len = zir.extra[extra_index]; extra_index += 1; any_inits = true; } } } if (any_inits) { for (fields, 0..) |zir_field, field_i| { extra_index += zir_field.type_body_len; extra_index += zir_field.align_body_len; const body = zir.bodySlice(extra_index, zir_field.init_body_len); extra_index += zir_field.init_body_len; if (body.len == 0) continue; // Pre-populate the type mapping the body expects to be there. // In init bodies, the zir index of the struct itself is used // to refer to the current field type. const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]); const type_ref = Air.internedToRef(field_ty.toIntern()); try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index}); sema.inst_map.putAssumeCapacity(zir_index, type_ref); const init_src: LazySrcLoc = .{ .base_node_inst = struct_type.zir_index.unwrap().?, .offset = .{ .container_field_value = @intCast(field_i) }, }; const init = try sema.resolveInlineBody(&block_scope, body, zir_index); const coerced = try sema.coerce(&block_scope, field_ty, init, init_src); const default_val = try sema.resolveValue(coerced) orelse { return sema.failWithNeededComptime(&block_scope, init_src, .{ .needed_comptime_reason = "struct field default value must be comptime-known", }); }; if (default_val.canMutateComptimeVarState(zcu)) { return sema.fail(&block_scope, init_src, "field default value contains reference to comptime-mutable memory", .{}); } struct_type.field_inits.get(ip)[field_i] = default_val.toIntern(); } } try sema.flushExports(); } fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Index, union_type: InternPool.LoadedUnionType) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const cau_index = union_type.cau; const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir; const zir_index = union_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .union_decl); const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small); const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand); var extra_index: usize = extra.end; const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { const ty_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); extra_index += 1; break :blk ty_ref; } else .none; const captures_len = if (small.has_captures_len) blk: { const captures_len = zir.extra[extra_index]; extra_index += 1; break :blk captures_len; } else 0; const body_len = if (small.has_body_len) blk: { const body_len = zir.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; // Skip over captures and decls. extra_index += captures_len + decls_len; const body = zir.bodySlice(extra_index, body_len); extra_index += body.len; var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = arena, .code = zir, .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .namespace = union_type.namespace, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = union_type.zir_index, .type_name_ctx = union_type.name, }; defer assert(block_scope.instructions.items.len == 0); const src = block_scope.nodeOffset(0); if (body.len != 0) { _ = try sema.analyzeInlineBody(&block_scope, body, zir_index); } var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; var explicit_tags_seen: []bool = &.{}; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .node_offset_container_tag = 0 }, }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; if (int_tag_ty.zigTypeTag(zcu) != .Int and int_tag_ty.zigTypeTag(zcu) != .ComptimeInt) { return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(pt)}); } if (fields_len > 0) { const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1); if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ int_tag_ty.fmt(pt), fields_len - 1, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len); } } else { // The provided type is the enum tag type. const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) { .enum_type => ip.loadEnumType(provided_ty.toIntern()), else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}), }; union_type.setTagType(ip, provided_ty.toIntern()); // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); @memset(explicit_tags_seen, false); } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } var field_types: std.ArrayListUnmanaged(InternPool.Index) = .{}; var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .{}; try field_types.ensureTotalCapacityPrecise(sema.arena, fields_len); if (small.any_aligned_fields) try field_aligns.ensureTotalCapacityPrecise(sema.arena, fields_len); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; var bit_bag_index: usize = extra_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]); const field_name_zir = zir.nullTerminatedString(field_name_index); extra_index += 1; // doc_comment extra_index += 1; const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { const field_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); extra_index += 1; break :blk field_type_ref; } else .none; const align_ref: Zir.Inst.Ref = if (has_align) blk: { const align_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); extra_index += 1; break :blk align_ref; } else .none; const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); } else .none; const name_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .container_field_name = field_i }, }; const value_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .container_field_value = field_i }, }; const align_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .container_field_align = field_i }, }; const type_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .container_field_type = field_i }, }; if (enum_field_vals.capacity() > 0) { const enum_tag_val = if (tag_ref != .none) blk: { const val = try sema.semaUnionFieldVal(&block_scope, value_src, int_tag_ty, tag_ref); last_tag_val = val; break :blk val; } else blk: { const val = if (last_tag_val) |val| try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else try pt.intValue(int_tag_ty, 0); last_tag_val = val; break :blk val; }; const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern()); if (gop.found_existing) { const other_value_src: LazySrcLoc = .{ .base_node_inst = union_type.zir_index, .offset = .{ .container_field_value = @intCast(gop.index) }, }; const msg = msg: { const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{enum_tag_val.fmtValueSema(pt, &sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_value_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } } // This string needs to outlive the ZIR code. const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); if (enum_field_names.len != 0) { enum_field_names[field_i] = field_name; } const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) Type.noreturn else try sema.resolveType(&block_scope, type_src, field_type_ref); if (field_ty.isGenericPoison()) { return error.GenericPoison; } if (explicit_tags_seen.len > 0) { const tag_ty = union_type.tagTypeUnordered(ip); const tag_info = ip.loadEnumType(tag_ty); const enum_index = tag_info.nameIndex(ip, field_name) orelse { return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{ field_name.fmt(ip), Type.fromInterned(tag_ty).fmt(pt), }); }; // No check for duplicate because the check already happened in order // to create the enum type in the first place. assert(!explicit_tags_seen[enum_index]); explicit_tags_seen[enum_index] = true; // Enforce the enum fields and the union fields being in the same order. if (enum_index != field_i) { const msg = msg: { const enum_field_src: LazySrcLoc = .{ .base_node_inst = tag_info.zir_index.unwrap().?, .offset = .{ .container_field_name = enum_index }, }; const msg = try sema.errMsg(name_src, "union field '{}' ordered differently than corresponding enum field", .{ field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.errNote(enum_field_src, msg, "enum field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } } if (field_ty.zigTypeTag(zcu) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(type_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } const layout = union_type.flagsUnordered(ip).layout; if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { const msg = try sema.errMsg(type_src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, type_src, field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) { const msg = msg: { const msg = try sema.errMsg(type_src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, type_src, field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } field_types.appendAssumeCapacity(field_ty.toIntern()); if (small.any_aligned_fields) { field_aligns.appendAssumeCapacity(if (align_ref != .none) try sema.resolveAlign(&block_scope, align_src, align_ref) else .none); } else { assert(align_ref == .none); } } union_type.setFieldTypes(ip, field_types.items); union_type.setFieldAligns(ip, field_aligns.items); if (explicit_tags_seen.len > 0) { const tag_ty = union_type.tagTypeUnordered(ip); const tag_info = ip.loadEnumType(tag_ty); if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(src, "enum field(s) missing in union", .{}); errdefer msg.destroy(sema.gpa); for (tag_info.names.get(ip), 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{ field_name.fmt(ip), }); } try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty)); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } } else if (enum_field_vals.count() > 0) { const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_ty, union_type.name); union_type.setTagType(ip, enum_ty); } else { const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_ty, union_type.name); union_type.setTagType(ip, enum_ty); } try sema.flushExports(); } fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value { const coerced = try sema.coerce(block, int_tag_ty, tag_ref, src); return sema.resolveConstDefinedValue(block, src, coerced, .{ .needed_comptime_reason = "enum tag value must be comptime-known", }); } fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, enum_field_names: []const InternPool.NullTerminatedString, enum_field_vals: []const InternPool.Index, union_type: InternPool.Index, union_name: InternPool.NullTerminatedString, ) !InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; const name = try ip.getOrPutStringFmt( gpa, pt.tid, "@typeInfo({}).Union.tag_type.?", .{union_name.fmt(ip)}, .no_embedded_nulls, ); const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .name = name, .owner_union_ty = union_type, .tag_ty = if (enum_field_vals.len == 0) (try pt.intType(.unsigned, 0)).toIntern() else ip.typeOf(enum_field_vals[0]), .names = enum_field_names, .values = enum_field_vals, .tag_mode = .explicit, .parent_namespace = block.namespace, }); return enum_ty; } fn generateUnionTagTypeSimple( sema: *Sema, block: *Block, enum_field_names: []const InternPool.NullTerminatedString, union_type: InternPool.Index, union_name: InternPool.NullTerminatedString, ) !InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const gpa = sema.gpa; const name = try ip.getOrPutStringFmt( gpa, pt.tid, "@typeInfo({}).Union.tag_type.?", .{union_name.fmt(ip)}, .no_embedded_nulls, ); const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ .name = name, .owner_union_ty = union_type, .tag_ty = if (enum_field_names.len == 0) (try pt.intType(.unsigned, 0)).toIntern() else (try pt.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, .values = &.{}, .tag_mode = .auto, .parent_namespace = block.namespace, }); return enum_ty; } /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; return switch (ty.toIntern()) { .u0_type, .i0_type, => try pt.intValue(ty, 0), .u1_type, .u8_type, .i8_type, .u16_type, .i16_type, .u29_type, .u32_type, .i32_type, .u64_type, .i64_type, .u80_type, .u128_type, .i128_type, .usize_type, .isize_type, .c_char_type, .c_short_type, .c_ushort_type, .c_int_type, .c_uint_type, .c_long_type, .c_ulong_type, .c_longlong_type, .c_ulonglong_type, .c_longdouble_type, .f16_type, .f32_type, .f64_type, .f80_type, .f128_type, .anyopaque_type, .bool_type, .type_type, .anyerror_type, .adhoc_inferred_error_set_type, .comptime_int_type, .comptime_float_type, .enum_literal_type, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, .slice_const_u8_type, .slice_const_u8_sentinel_0_type, .anyerror_void_error_union_type, => null, .void_type => Value.void, .noreturn_type => Value.@"unreachable", .anyframe_type => unreachable, .null_type => Value.null, .undefined_type => Value.undef, .optional_noreturn_type => try pt.nullValue(ty), .generic_poison_type => error.GenericPoison, .empty_struct_type => Value.empty_struct, // values, not types .undef, .zero, .zero_usize, .zero_u8, .one, .one_usize, .one_u8, .four_u8, .negative_one, .void_value, .unreachable_value, .null_value, .bool_true, .bool_false, .empty_struct, .generic_poison, // invalid .none, => unreachable, _ => switch (ty.toIntern().unwrap(ip).getTag(ip)) { .removed => unreachable, .type_int_signed, // i0 handled above .type_int_unsigned, // u0 handled above .type_pointer, .type_slice, .type_optional, // ?noreturn handled above .type_anyframe, .type_error_union, .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_opaque, .type_function, => null, .simple_type, // handled above // values, not types .undef, .simple_value, .ptr_nav, .ptr_uav, .ptr_uav_aligned, .ptr_comptime_alloc, .ptr_comptime_field, .ptr_int, .ptr_eu_payload, .ptr_opt_payload, .ptr_elem, .ptr_field, .ptr_slice, .opt_payload, .opt_null, .int_u8, .int_u16, .int_u32, .int_i32, .int_usize, .int_comptime_int_u32, .int_comptime_int_i32, .int_small, .int_positive, .int_negative, .int_lazy_align, .int_lazy_size, .error_set_error, .error_union_error, .error_union_payload, .enum_literal, .enum_tag, .float_f16, .float_f32, .float_f64, .float_f80, .float_f128, .float_c_longdouble_f80, .float_c_longdouble_f128, .float_comptime_float, .variable, .@"extern", .func_decl, .func_instance, .func_coerced, .only_possible_value, .union_value, .bytes, .aggregate, .repeated, // memoized value, not types .memoized_call, => unreachable, .type_array_big, .type_array_small, .type_vector, .type_enum_auto, .type_enum_explicit, .type_enum_nonexhaustive, .type_struct, .type_struct_anon, .type_struct_packed, .type_struct_packed_inits, .type_tuple_anon, .type_union, => switch (ip.indexToKey(ty.toIntern())) { inline .array_type, .vector_type => |seq_type, seq_tag| { const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })); if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| { return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .repeated_elem = opv.toIntern() }, } })); } return null; }, .struct_type => { // Resolving the layout first helps to avoid loops. // If the type has a coherent layout, we can recurse through fields safely. try ty.resolveLayout(pt); const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and // therefore has one possible value. return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })); } const field_vals = try sema.arena.alloc( InternPool.Index, struct_type.field_types.len, ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { try ty.resolveStructFieldInits(pt); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; } // In this case the struct has no runtime-known fields and // therefore has one possible value. return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = field_vals }, } })); }, .anon_struct_type => |tuple| { for (tuple.values.get(ip)) |val| { if (val == .none) return null; } // In this case the struct has all comptime-known fields and // therefore has one possible value. // TODO: write something like getCoercedInts to avoid needing to dupe return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) }, } })); }, .union_type => { // Resolving the layout first helps to avoid loops. // If the type has a coherent layout, we can recurse through fields safely. try ty.resolveLayout(pt); const union_obj = ip.loadUnionType(ty.toIntern()); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse return null; if (union_obj.field_types.len == 0) { const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try pt.intern(.{ .un = .{ .ty = ty.toIntern(), .tag = tag_val.toIntern(), .val = val_val.toIntern(), } }); return Value.fromInterned(only); }, .enum_type => { const enum_type = ip.loadEnumType(ty.toIntern()); switch (enum_type.tag_mode) { .nonexhaustive => { if (enum_type.tag_ty == .comptime_int_type) return null; if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| { const only = try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = int_opv.toIntern(), } }); return Value.fromInterned(only); } return null; }, .auto, .explicit => { if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(pt)) return null; return Value.fromInterned(switch (enum_type.names.len) { 0 => try pt.intern(.{ .empty_enum_value = ty.toIntern() }), 1 => try pt.intern(.{ .enum_tag = .{ .ty = ty.toIntern(), .int = if (enum_type.values.len == 0) (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern() else try ip.getCoercedInts( zcu.gpa, pt.tid, ip.indexToKey(enum_type.values.get(ip)[0]).int, enum_type.tag_ty, ), } }), else => return null, }); }, } }, else => unreachable, }, }, }; } /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { return sema.getTmpAir().typeOf(inst, &sema.pt.zcu.intern_pool); } pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, }; } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); return sema.addExtraAssumeCapacity(extra); } pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result: u32 = @intCast(sema.air_extra.items.len); inline for (fields) |field| { sema.air_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), i32 => @bitCast(@field(extra, field.name)), Air.Inst.Ref, InternPool.Index => @intFromEnum(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; } fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { sema.air_extra.appendSliceAssumeCapacity(@ptrCast(refs)); } fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); switch (air_tags[@intFromEnum(inst_index)]) { .br => return air_datas[@intFromEnum(inst_index)].br.block_inst, else => return null, } } fn isComptimeKnown( sema: *Sema, inst: Air.Inst.Ref, ) !bool { return (try sema.resolveValue(inst)) != null; } fn analyzeComptimeAlloc( sema: *Sema, block: *Block, var_type: Type, alignment: Alignment, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); const ptr_type = try pt.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, .address_space = target_util.defaultAddressSpace(mod.getTarget(), .global_constant), }, }); const alloc = try sema.newComptimeAlloc(block, var_type, alignment); return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_type.toIntern(), .base_addr = .{ .comptime_alloc = alloc }, .byte_offset = 0, } }))); } /// The places where a user can specify an address space attribute pub const AddressSpaceContext = enum { /// A function is specified to be placed in a certain address space. function, /// A (global) variable is specified to be placed in a certain address space. /// In contrast to .constant, these values (and thus the address space they will be /// placed in) are required to be mutable. variable, /// A (global) constant value is specified to be placed in a certain address space. /// In contrast to .variable, values placed in this address space are not required to be mutable. constant, /// A pointer is ascripted to point into a certain address space. pointer, }; fn resolveAddressSpace( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { const air_ref = try sema.resolveInst(zir_ref); return sema.analyzeAsAddressSpace(block, src, air_ref, ctx); } pub fn analyzeAsAddressSpace( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { const pt = sema.pt; const mod = pt.zcu; const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const coerced = try sema.coerce(block, addrspace_ty, air_ref, src); const addrspace_val = try sema.resolveConstDefinedValue(block, src, coerced, .{ .needed_comptime_reason = "address space must be comptime-known", }); const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_val); const target = pt.zcu.getTarget(); const arch = target.cpu.arch; const is_nv = arch.isNvptx(); const is_amd = arch == .amdgcn; const is_spirv = arch.isSpirV(); const is_gpu = is_nv or is_amd or is_spirv; const supported = switch (address_space) { // TODO: on spir-v only when os is opencl. .generic => true, .gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer, // TODO: check that .shared and .local are left uninitialized .param => is_nv, .input, .output, .uniform => is_spirv, .global, .shared, .local => is_gpu, .constant => is_gpu and (ctx == .constant), // TODO this should also check how many flash banks the cpu has .flash, .flash1, .flash2, .flash3, .flash4, .flash5 => arch == .avr, }; if (!supported) { // TODO error messages could be made more elaborate here const entity = switch (ctx) { .function => "functions", .variable => "mutable values", .constant => "constant values", .pointer => "pointers", }; return sema.fail( block, src, "{s} with address space '{s}' are not supported on {s}", .{ entity, @tagName(address_space), arch.genericName() }, ); } return address_space; } /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { // TODO: audit use sites to eliminate this coercion const pt = sema.pt; const coerced_ptr_val = try pt.getCoerced(ptr_val, ptr_ty); switch (try sema.pointerDerefExtra(block, src, coerced_ptr_val)) { .runtime_load => return null, .val => |v| return v, .needed_well_defined => |ty| return sema.fail( block, src, "comptime dereference requires '{}' to have a well-defined layout", .{ty.fmt(pt)}, ), .out_of_bounds => |ty| return sema.fail( block, src, "dereference of '{}' exceeds bounds of containing decl of type '{}'", .{ ptr_ty.fmt(pt), ty.fmt(pt) }, ), } } const DerefResult = union(enum) { runtime_load, val: Value, needed_well_defined: Type, out_of_bounds: Type, }; fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value) CompileError!DerefResult { const pt = sema.pt; const ip = &pt.zcu.intern_pool; switch (try sema.loadComptimePtr(block, src, ptr_val)) { .success => |mv| return .{ .val = try mv.intern(pt, sema.arena) }, .runtime_load => return .runtime_load, .undef => return sema.failWithUseOfUndef(block, src), .err_payload => |err_name| return sema.fail(block, src, "attempt to unwrap error: {}", .{err_name.fmt(ip)}), .null_payload => return sema.fail(block, src, "attempt to use null value", .{}), .inactive_union_field => return sema.fail(block, src, "access of inactive union field", .{}), .needed_well_defined => |ty| return .{ .needed_well_defined = ty }, .out_of_bounds => |ty| return .{ .out_of_bounds = ty }, .exceeds_host_size => return sema.fail(block, src, "bit-pointer target exceeds host size", .{}), } } /// Used to convert a u64 value to a usize value, emitting a compile error if the number /// is too big to fit. fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize { if (@bitSizeOf(u64) <= @bitSizeOf(usize)) return int; return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value '{d}' which is too big for this compiler implementation to handle", .{int}); } /// For pointer-like optionals, it returns the pointer type. For pointers, /// the type is returned unmodified. /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const pt = sema.pt; const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => ty, .Slice => null, }, .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice, .C => null, .Many, .One => { if (ptr_type.flags.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers const payload_ty = Type.fromInterned(opt_child); if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } return payload_ty; }, }, else => null, }, else => null, }; } /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { return ty.comptimeOnlyAdvanced(sema.pt, .sema); } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { return ty.hasRuntimeBitsAdvanced(sema.pt, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { const pt = sema.pt; try ty.resolveLayout(pt); return ty.abiSize(pt); } pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { return (try ty.abiAlignmentAdvanced(sema.pt, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { return ty.fnHasRuntimeBitsAdvanced(sema.pt, .sema); } fn unionFieldIndex( sema: *Sema, block: *Block, union_ty: Type, field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; try union_ty.resolveFields(pt); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); return @intCast(field_index); } fn structFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; try struct_ty.resolveFields(pt); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { const struct_type = mod.typeToStruct(struct_ty).?; return struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_src, field_name); } } fn anonStructFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| { if (name == field_name) return @intCast(i); }, .struct_type => if (ip.loadStructType(struct_ty.toIntern()).nameIndex(ip, field_name)) |i| return i, else => unreachable, } return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ field_name.fmt(ip), struct_ty.fmt(pt), }); } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { const pt = sema.pt; var overflow: usize = undefined; return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { error.Overflow => { const is_vec = ty.isVector(pt.zcu); overflow_idx.* = if (is_vec) overflow else 0; const safe_ty = if (is_vec) try pt.vectorType(.{ .len = ty.vectorLen(pt.zcu), .child = .comptime_int_type, }) else Type.comptime_int; return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { error.Overflow => unreachable, else => |e| return e, }; }, else => |e| return e, }; } fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; return error.Overflow; }, else => |e| return e, }; scalar.* = val.toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })); } return sema.intAddScalar(lhs, rhs, ty); } fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const pt = sema.pt; if (scalar_ty.toIntern() != .comptime_int_type) { const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty); if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow; return res.wrapped_result; } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); return pt.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. fn numberAddWrapScalar( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value { const pt = sema.pt; const mod = pt.zcu; if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty); if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { return Value.floatAdd(lhs, rhs, ty, sema.arena, pt); } const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { const pt = sema.pt; var overflow: usize = undefined; return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { error.Overflow => { const is_vec = ty.isVector(pt.zcu); overflow_idx.* = if (is_vec) overflow else 0; const safe_ty = if (is_vec) try pt.vectorType(.{ .len = ty.vectorLen(pt.zcu), .child = .comptime_int_type, }) else Type.comptime_int; return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { error.Overflow => unreachable, else => |e| return e, }; }, else => |e| return e, }; } fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { const pt = sema.pt; if (ty.zigTypeTag(pt.zcu) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu)); const scalar_ty = ty.scalarType(pt.zcu); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { error.Overflow => { overflow_idx.* = i; return error.Overflow; }, else => |e| return e, }; scalar.* = val.toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })); } return sema.intSubScalar(lhs, rhs, ty); } fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const pt = sema.pt; if (scalar_ty.toIntern() != .comptime_int_type) { const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty); if (res.overflow_bit.compareAllWithZero(.neq, pt)) return error.Overflow; return res.wrapped_result; } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); return pt.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. fn numberSubWrapScalar( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value { const pt = sema.pt; const mod = pt.zcu; if (lhs.isUndef(mod) or rhs.isUndef(mod)) return pt.undefValue(ty); if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { return Value.floatSub(lhs, rhs, ty, sema.arena, pt); } const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } fn intSubWithOverflow( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); const result_data = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return Value.OverflowArithmeticResult{ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })), .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); } fn intSubWithOverflowScalar( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { const pt = sema.pt; const mod = pt.zcu; const info = ty.intInfo(mod); if (lhs.isUndef(mod) or rhs.isUndef(mod)) { return .{ .overflow_bit = try pt.undefValue(Type.u1), .wrapped_result = try pt.undefValue(ty), }; } var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); const wrapped_result = try pt.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), .wrapped_result = wrapped_result, }; } const IntFromFloatMode = enum { exact, truncate }; fn intFromFloat( sema: *Sema, block: *Block, src: LazySrcLoc, val: Value, float_ty: Type, int_ty: Type, mode: IntFromFloatMode, ) CompileError!Value { const pt = sema.pt; const mod = pt.zcu; if (float_ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(pt, i); scalar.* = (try sema.intFromFloatScalar(block, src, elem_val, int_ty.scalarType(mod), mode)).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = int_ty.toIntern(), .storage = .{ .elems = result_data }, } })); } return sema.intFromFloatScalar(block, src, val, int_ty, mode); } // float is expected to be finite and non-NaN fn float128IntPartToBigInt( arena: Allocator, float: f128, ) !std.math.big.int.Managed { const is_negative = std.math.signbit(float); const floored = @floor(@abs(float)); var rational = try std.math.big.Rational.init(arena); defer rational.q.deinit(); rational.setFloat(f128, floored) catch |err| switch (err) { error.NonFiniteFloat => unreachable, error.OutOfMemory => return error.OutOfMemory, }; // The float is reduced in rational.setFloat, so we assert that denominator is equal to one const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true }; assert(rational.q.toConst().eqlAbs(big_one)); if (is_negative) { rational.negate(); } return rational.p; } fn intFromFloatScalar( sema: *Sema, block: *Block, src: LazySrcLoc, val: Value, int_ty: Type, mode: IntFromFloatMode, ) CompileError!Value { const pt = sema.pt; const mod = pt.zcu; if (val.isUndef(mod)) return sema.failWithUseOfUndef(block, src); if (mode == .exact and val.floatHasFraction(mod)) return sema.fail( block, src, "fractional component prevents float value '{}' from coercion to type '{}'", .{ val.fmtValueSema(pt, sema), int_ty.fmt(pt) }, ); const float = val.toFloat(f128, pt); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(pt), }); } if (std.math.isInf(float)) { return sema.fail(block, src, "float value Inf cannot be stored in integer type '{}'", .{ int_ty.fmt(pt), }); } var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst()); if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ val.fmtValueSema(pt, sema), int_ty.fmt(pt), }); } return pt.getCoerced(cti_result, int_ty); } /// Asserts the value is an integer, and the destination type is ComptimeInt or Int. /// Vectors are also accepted. Vector results are reduced with AND. /// /// If provided, `vector_index` reports the first element that failed the range check. fn intFitsInType( sema: *Sema, val: Value, ty: Type, vector_index: ?*usize, ) CompileError!bool { const pt = sema.pt; const mod = pt.zcu; if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); switch (val.toIntern()) { .zero_usize, .zero_u8 => return true, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => return true, .variable, .@"extern", .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, .unsigned => info.bits >= ptr_bits, }; }, .int => |int| switch (int.storage) { .u64, .i64, .big_int => { var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; const big_int = int.storage.toBigInt(&buffer); return big_int.fitsInTwosComp(info.signedness, info.bits); }, .lazy_align => |lazy_ty| { const max_needed_bits = @as(u16, 16) + @intFromBool(info.signedness == .signed); // If it is u16 or bigger we know the alignment fits without resolving it. if (info.bits >= max_needed_bits) return true; const x = try sema.typeAbiAlignment(Type.fromInterned(lazy_ty)); if (x == .none) return true; const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed); return info.bits >= actual_needed_bits; }, .lazy_size => |lazy_ty| { const max_needed_bits = @as(u16, 64) + @intFromBool(info.signedness == .signed); // If it is u64 or bigger we know the size fits without resolving it. if (info.bits >= max_needed_bits) return true; const x = try sema.typeAbiSize(Type.fromInterned(lazy_ty)); if (x == 0) return true; const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed); return info.bits >= actual_needed_bits; }, }, .aggregate => |aggregate| { assert(ty.zigTypeTag(mod) == .Vector); return switch (aggregate.storage) { .bytes => |bytes| for (bytes.toSlice(ty.vectorLen(mod), &mod.intern_pool), 0..) |byte, i| { if (byte == 0) continue; const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed); if (info.bits >= actual_needed_bits) continue; if (vector_index) |vi| vi.* = i; break false; } else true, .elems, .repeated_elem => for (switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| elems, .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), }, 0..) |elem, i| { if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(mod), null)) continue; if (vector_index) |vi| vi.* = i; break false; } else true, }; }, else => unreachable, }, } } fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const pt = sema.pt; if (!(try int_val.compareAllWithZeroSema(.gte, pt))) return false; const end_val = try pt.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const pt = sema.pt; const mod = pt.zcu; const enum_type = mod.intern_pool.loadEnumType(ty.toIntern()); assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false; const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty)); return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null; } fn intAddWithOverflow( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); const result_data = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); of.* = of_math_result.overflow_bit.toIntern(); scalar.* = of_math_result.wrapped_result.toIntern(); } return Value.OverflowArithmeticResult{ .overflow_bit = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = (try pt.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })), .wrapped_result = Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); } fn intAddWithOverflowScalar( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { const pt = sema.pt; const mod = pt.zcu; const info = ty.intInfo(mod); if (lhs.isUndef(mod) or rhs.isUndef(mod)) { return .{ .overflow_bit = try pt.undefValue(Type.u1), .wrapped_result = try pt.undefValue(ty), }; } var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, pt, .sema); const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, pt, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); const result = try pt.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = try pt.intValue(Type.u1, @intFromBool(overflowed)), .wrapped_result = result, }; } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if the comparison is true for ALL elements. /// /// Note that `!compareAll(.eq, ...) != compareAll(.neq, ...)` fn compareAll( sema: *Sema, lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, ) CompileError!bool { const pt = sema.pt; const mod = pt.zcu; if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen(mod)) : (i += 1) { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } return true; } return sema.compareScalar(lhs, op, rhs, ty); } /// Asserts the values are comparable. Both operands have type `ty`. fn compareScalar( sema: *Sema, lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, ) CompileError!bool { const pt = sema.pt; const coerced_lhs = try pt.getCoerced(lhs, ty); const coerced_rhs = try pt.getCoerced(rhs, ty); switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, pt, .sema), } } fn valuesEqual( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) CompileError!bool { return lhs.eql(rhs, ty, sema.pt.zcu); } /// Asserts the values are comparable vectors of type `ty`. fn compareVector( sema: *Sema, lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, ) !Value { const pt = sema.pt; const mod = pt.zcu; assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool).toIntern(); } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = (try pt.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), .storage = .{ .elems = result_data }, } })); } /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { const pt = sema.pt; const ip = &pt.zcu.intern_pool; const arena = sema.arena; const lhs_names = lhs.errorSetNames(pt.zcu); const rhs_names = rhs.errorSetNames(pt.zcu); var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(arena, lhs_names.len); for (0..lhs_names.len) |lhs_index| { names.putAssumeCapacityNoClobber(lhs_names.get(ip)[lhs_index], {}); } for (0..rhs_names.len) |rhs_index| { try names.put(arena, rhs_names.get(ip)[rhs_index], {}); } return pt.errorSetFromUnsortedNames(names.keys()); } /// Avoids crashing the compiler when asking if inferred allocations are noreturn. fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { if (ref == .unreachable_value) return true; if (ref.toIndex()) |inst| switch (sema.air_instructions.items(.tag)[@intFromEnum(inst)]) { .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, }; return sema.typeOf(ref).isNoReturn(sema.pt.zcu); } /// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { if (ref.toIndex()) |inst| switch (sema.air_instructions.items(.tag)[@intFromEnum(inst)]) { .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, }; return sema.typeOf(ref).zigTypeTag(sema.pt.zcu) == tag; } pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { const zcu = sema.pt.zcu; if (!zcu.comp.incremental) return; const gop = try sema.dependencies.getOrPut(sema.gpa, dependee); if (gop.found_existing) return; // Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields // of a type and they use `@This()`. This dependency would be unnecessary, and in fact would // just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve // the loop. switch (sema.owner.unwrap()) { .cau => |cau| switch (dependee) { .nav_val => |nav| if (zcu.intern_pool.getNav(nav).analysis_owner == cau.toOptional()) { return; }, else => {}, }, .func => {}, } try zcu.intern_pool.addDependency(sema.gpa, sema.owner, dependee); } fn isComptimeMutablePtr(sema: *Sema, val: Value) bool { return switch (sema.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .slice => |slice| sema.isComptimeMutablePtr(Value.fromInterned(slice.ptr)), .ptr => |ptr| switch (ptr.base_addr) { .uav, .nav, .int => false, .comptime_field => true, .comptime_alloc => |alloc_index| !sema.getComptimeAlloc(alloc_index).is_const, .eu_payload, .opt_payload => |base| sema.isComptimeMutablePtr(Value.fromInterned(base)), .arr_elem, .field => |bi| sema.isComptimeMutablePtr(Value.fromInterned(bi.base)), }, else => false, }; } fn checkRuntimeValue(sema: *Sema, ptr: Air.Inst.Ref) bool { const val = ptr.toInterned() orelse return true; return !Value.fromInterned(val).canMutateComptimeVarState(sema.pt.zcu); } fn validateRuntimeValue(sema: *Sema, block: *Block, val_src: LazySrcLoc, val: Air.Inst.Ref) CompileError!void { if (sema.checkRuntimeValue(val)) return; return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(val_src, "runtime value contains reference to comptime var", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(val_src, msg, "comptime var pointers are not available at runtime", .{}); break :msg msg; }); } /// Returns true if any value contained in `val` is undefined. fn anyUndef(sema: *Sema, block: *Block, src: LazySrcLoc, val: Value) !bool { const pt = sema.pt; const mod = pt.zcu; return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, .slice => { // If the slice contents are runtime-known, reification will fail later on with a // specific error message. const arr = try sema.maybeDerefSliceAsArray(block, src, val) orelse return false; return sema.anyUndef(block, src, arr); }, .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| { const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i]; if (try sema.anyUndef(block, src, Value.fromInterned(elem))) break true; } else false, else => false, }; } /// Asserts that `slice_val` is a slice of `u8`. fn sliceToIpString( sema: *Sema, block: *Block, src: LazySrcLoc, slice_val: Value, reason: NeededComptimeReason, ) CompileError!InternPool.NullTerminatedString { const pt = sema.pt; const zcu = pt.zcu; const slice_ty = slice_val.typeOf(zcu); assert(slice_ty.isSlice(zcu)); assert(slice_ty.childType(zcu).toIntern() == .u8_type); const array_val = try sema.derefSliceAsArray(block, src, slice_val, reason); const array_ty = array_val.typeOf(zcu); return array_val.toIpString(array_ty, pt); } /// Given a slice value, attempts to dereference it into a comptime-known array. /// Emits a compile error if the contents of the slice are not comptime-known. /// Asserts that `slice_val` is a slice. fn derefSliceAsArray( sema: *Sema, block: *Block, src: LazySrcLoc, slice_val: Value, reason: NeededComptimeReason, ) CompileError!Value { return try sema.maybeDerefSliceAsArray(block, src, slice_val) orelse { return sema.failWithNeededComptime(block, src, reason); }; } /// Given a slice value, attempts to dereference it into a comptime-known array. /// Returns `null` if the contents of the slice are not comptime-known. /// Asserts that `slice_val` is a slice. fn maybeDerefSliceAsArray( sema: *Sema, block: *Block, src: LazySrcLoc, slice_val: Value, ) CompileError!?Value { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(slice_val.typeOf(zcu).isSlice(zcu)); const slice = switch (ip.indexToKey(slice_val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .slice => |slice| slice, else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); const len = try Value.fromInterned(slice.len).toUnsignedIntSema(pt); const array_ty = try pt.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); const ptr_ty = try pt.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); p.sentinel = .none; break :p p; }); const casted_ptr = try pt.getCoerced(Value.fromInterned(slice.ptr), ptr_ty); return sema.pointerDeref(block, src, casted_ptr, ptr_ty); } fn analyzeUnreachable(sema: *Sema, block: *Block, src: LazySrcLoc, safety_check: bool) !void { if (safety_check and block.wantSafety()) { try sema.safetyPanic(block, src, .unreach); } else { _ = try block.addNoOp(.unreach); } } /// This should be called exactly once, at the end of a `Sema`'s lifetime. /// It takes the exports stored in `sema.export` and flushes them to the `Zcu` /// to be processed by the linker after the update. pub fn flushExports(sema: *Sema) !void { if (sema.exports.items.len == 0) return; const zcu = sema.pt.zcu; const gpa = zcu.gpa; // There may be existing exports. For instance, a struct may export // things during both field type resolution and field default resolution. // // So, pick up and delete any existing exports. This strategy performs // redundant work, but that's okay, because this case is exceedingly rare. if (zcu.single_exports.get(sema.owner)) |export_idx| { try sema.exports.append(gpa, zcu.all_exports.items[export_idx]); } else if (zcu.multi_exports.get(sema.owner)) |info| { try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]); } zcu.deleteUnitExports(sema.owner); // `sema.exports` is completed; store the data into the `Zcu`. if (sema.exports.items.len == 1) { try zcu.single_exports.ensureUnusedCapacity(gpa, 1); const export_idx = zcu.free_exports.popOrNull() orelse idx: { _ = try zcu.all_exports.addOne(gpa); break :idx zcu.all_exports.items.len - 1; }; zcu.all_exports.items[export_idx] = sema.exports.items[0]; zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, @intCast(export_idx)); } else { try zcu.multi_exports.ensureUnusedCapacity(gpa, 1); const exports_base = zcu.all_exports.items.len; try zcu.all_exports.appendSlice(gpa, sema.exports.items); zcu.multi_exports.putAssumeCapacityNoClobber(sema.owner, .{ .index = @intCast(exports_base), .len = @intCast(sema.exports.items.len), }); } } /// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches /// the corresponding `Nav`. fn getOwnerCauNav(sema: *Sema) InternPool.Nav.Index { const cau = sema.owner.unwrap().cau; return sema.pt.zcu.intern_pool.getCau(cau).owner.unwrap().nav; } /// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches /// the declaration name from its corresponding `Nav`. fn getOwnerCauNavName(sema: *Sema) InternPool.NullTerminatedString { const nav = sema.getOwnerCauNav(); return sema.pt.zcu.intern_pool.getNav(nav).name; } /// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches /// the `TrackedInst` corresponding to this `declaration` instruction. fn getOwnerCauDeclInst(sema: *Sema) InternPool.TrackedInst.Index { const ip = &sema.pt.zcu.intern_pool; const cau = ip.getCau(sema.owner.unwrap().cau); assert(cau.owner.unwrap() == .nav); return cau.zir_index; } /// Given that this `Sema` is owned by a runtime function, fetches the /// `TrackedInst` corresponding to its `declaration` instruction. fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index { const zcu = sema.pt.zcu; const ip = &zcu.intern_pool; const func = sema.owner.unwrap().func; const func_info = zcu.funcInfo(func); const cau = if (func_info.generic_owner == .none) cau: { break :cau ip.getNav(func_info.owner_nav).analysis_owner.unwrap().?; } else cau: { const generic_owner = zcu.funcInfo(func_info.generic_owner); break :cau ip.getNav(generic_owner.owner_nav).analysis_owner.unwrap().?; }; return ip.getCau(cau).zir_index; } /// Called as soon as a `declared` enum type is created. /// Resolves the tag type and field inits. /// Marks the `src_inst` dependency on the enum's declaration, so call sites need not do this. pub fn resolveDeclaredEnum( pt: Zcu.PerThread, wip_ty: InternPool.WipEnumType, inst: Zir.Inst.Index, tracked_inst: InternPool.TrackedInst.Index, namespace: InternPool.NamespaceIndex, type_name: InternPool.NullTerminatedString, enum_cau: InternPool.Cau.Index, small: Zir.Inst.EnumDecl.Small, body: []const Zir.Inst.Index, tag_type_ref: Zir.Inst.Ref, any_values: bool, fields_len: u32, zir: Zir, body_end: usize, ) Zcu.CompileError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) }; const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } }; const anal_unit = AnalUnit.wrap(.{ .cau = enum_cau }); var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); var sema: Sema = .{ .pt = pt, .gpa = gpa, .arena = arena.allocator(), .code = zir, .owner = anal_unit, .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); try sema.declareDependency(.{ .src_hash = tracked_inst }); var block: Block = .{ .parent = null, .sema = &sema, .namespace = namespace, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = tracked_inst, .type_name_ctx = type_name, }; defer block.instructions.deinit(gpa); const int_tag_ty = ty: { if (body.len != 0) { _ = try sema.analyzeInlineBody(&block, body, inst); } if (tag_type_ref != .none) { const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref); if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) { return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)}); } break :ty ty; } else if (fields_len == 0) { break :ty try pt.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); break :ty try pt.intType(.unsigned, bits); } }; wip_ty.setTagTy(ip, int_tag_ty.toIntern()); if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) { return sema.fail(&block, src, "non-exhaustive enum specifies every value", .{}); } } var extra_index = body_end + bit_bags_count; var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var last_tag_val: ?Value = null; for (0..fields_len) |field_i_usize| { const field_i: u32 = @intCast(field_i_usize); if (field_i % 32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]); const field_name_zir = zir.nullTerminatedString(field_name_index); extra_index += 2; // field name, doc comment const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); const value_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .container_field_value = field_i }, }; const tag_overflow = if (has_tag_value) overflow: { const tag_val_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); last_tag_val = try sema.resolveConstDefinedValue(&block, .{ .base_node_inst = tracked_inst, .offset = .{ .container_field_name = field_i }, }, tag_inst, .{ .needed_comptime_reason = "enum tag value must be comptime-known", }); if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { assert(conflict.kind == .value); // AstGen validated names are unique const other_field_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .container_field_value = conflict.prev_field_idx }, }; const msg = msg: { const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block, msg); } break :overflow false; } else if (any_values) overflow: { var overflow: ?usize = null; last_tag_val = if (last_tag_val) |val| try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow) else try pt.intValue(int_tag_ty, 0); if (overflow != null) break :overflow true; if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| { assert(conflict.kind == .value); // AstGen validated names are unique const other_field_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .container_field_value = conflict.prev_field_idx }, }; const msg = msg: { const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)}); errdefer msg.destroy(gpa); try sema.errNote(other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block, msg); } break :overflow false; } else overflow: { assert(wip_ty.nextField(ip, field_name, .none) == null); last_tag_val = try pt.intValue(Type.comptime_int, field_i); if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty); break :overflow false; }; if (tag_overflow) { const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{ last_tag_val.?.fmtValueSema(pt, &sema), int_tag_ty.fmt(pt), }); return sema.failWithOwnedErrorMsg(&block, msg); } } } pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; const loadComptimePtr = @import("Sema/comptime_ptr_access.zig").loadComptimePtr; const ComptimeLoadResult = @import("Sema/comptime_ptr_access.zig").ComptimeLoadResult; const storeComptimePtr = @import("Sema/comptime_ptr_access.zig").storeComptimePtr; const ComptimeStoreResult = @import("Sema/comptime_ptr_access.zig").ComptimeStoreResult;