diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2022-03-23 18:45:51 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2022-03-23 18:45:51 -0700 |
| commit | 7378ce67dabf996f2d0927138f826dfb3d6fa05f (patch) | |
| tree | 8db3025dfa20a9120c62b7c56796e75cbcecec3d | |
| parent | 57539a26b4b1a118c9947116f2873ea3c0ced3da (diff) | |
| download | zig-7378ce67dabf996f2d0927138f826dfb3d6fa05f.tar.gz zig-7378ce67dabf996f2d0927138f826dfb3d6fa05f.zip | |
Sema: introduce a type resolution queue
That happens after a function body is analyzed. This prevents circular
dependency compile errors and yet a way to mark types that need to be
fully resolved before a given function is sent to the codegen backend.
| -rw-r--r-- | src/Air.zig | 2 | ||||
| -rw-r--r-- | src/Module.zig | 17 | ||||
| -rw-r--r-- | src/Sema.zig | 35 | ||||
| -rw-r--r-- | src/arch/wasm/CodeGen.zig | 66 | ||||
| -rw-r--r-- | test/behavior/eval.zig | 6 |
5 files changed, 82 insertions, 44 deletions
diff --git a/src/Air.zig b/src/Air.zig index e0f765ddc0..404ee8f9b7 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1072,7 +1072,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, - => return Type.initTag(.bool), + => return Type.bool, } } diff --git a/src/Module.zig b/src/Module.zig index 7b27546f52..79d6343949 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4837,6 +4837,9 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem // Finally we must resolve the return type and parameter types so that backends // have full access to type information. + // Crucially, this happens *after* we set the function state to success above, + // so that dependencies on the function body will now be satisfied rather than + // result in circular dependency errors. const src: LazySrcLoc = .{ .node_offset = 0 }; sema.resolveFnTypes(&inner_block, src, fn_ty_info) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -4847,6 +4850,20 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem else => |e| return e, }; + // Similarly, resolve any queued up types that were requested to be resolved for + // the backends. + for (sema.types_to_resolve.items) |inst_ref| { + const ty = sema.getTmpAir().getRefType(inst_ref); + sema.resolveTypeFully(&inner_block, src, ty) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + error.AnalysisFail => {}, + else => |e| return e, + }; + } + return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = sema.air_extra.toOwnedSlice(gpa), diff --git a/src/Sema.zig b/src/Sema.zig index 7e87ebbf33..d8cc908ae8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -63,6 +63,11 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. preallocated_new_func: ?*Module.Fn = null, +/// The key is `constant` AIR instructions to types that must be fully resolved +/// after the current function body analysis is done. +/// TODO: after upgrading to use InternPool change the key here to be an +/// InternPool value index. +types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{}, const std = @import("std"); const mem = std.mem; @@ -527,6 +532,7 @@ pub fn deinit(sema: *Sema) void { sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); + sema.types_to_resolve.deinit(gpa); sema.* = undefined; } @@ -1747,7 +1753,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE return sema.bitCast(block, ptr_ty, new_ptr, src); } const ty_op = air_datas[trash_inst].ty_op; - const operand_ty = sema.getTmpAir().typeOf(ty_op.operand); + const operand_ty = sema.typeOf(ty_op.operand); const ptr_operand_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = operand_ty, .@"addrspace" = addr_space, @@ -2592,7 +2598,7 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); try sema.requireRuntimeBlock(block, var_decl_src); - try sema.resolveTypeFully(block, ty_src, var_ty); + try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -2614,7 +2620,7 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); try sema.requireRuntimeBlock(block, var_decl_src); - try sema.resolveTypeFully(block, ty_src, var_ty); + try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -2770,7 +2776,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } try sema.requireRuntimeBlock(block, src); - try sema.resolveTypeFully(block, ty_src, final_elem_ty); + try sema.queueFullTypeResolution(final_elem_ty); // Change it to a normal alloc. sema.air_instructions.set(ptr_inst, .{ @@ -4363,6 +4369,8 @@ fn addDbgVar( else => unreachable, } + try sema.queueFullTypeResolution(operand_ty); + // Add the name to the AIR. const name_extra_index = @intCast(u32, sema.air_extra.items.len); const elements_used = name.len / 4 + 1; @@ -5004,7 +5012,7 @@ fn analyzeCall( } } - try sema.resolveTypeFully(block, call_src, func_ty_info.return_type); + try sema.queueFullTypeResolution(func_ty_info.return_type); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + args.len); @@ -5344,7 +5352,7 @@ fn instantiateGenericCall( total_i += 1; } - try sema.resolveTypeFully(block, call_src, new_fn_info.return_type); + try sema.queueFullTypeResolution(new_fn_info.return_type); } try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -12030,7 +12038,8 @@ fn unionInit( } try sema.requireRuntimeBlock(block, init_src); - try sema.resolveTypeLayout(block, union_ty_src, union_ty); + _ = union_ty_src; + try sema.queueFullTypeResolution(union_ty); return block.addUnionInit(union_ty, field_index, init); } @@ -12205,6 +12214,7 @@ fn finishStructInit( } try sema.requireRuntimeBlock(block, src); + try sema.queueFullTypeResolution(struct_ty); return block.addAggregateInit(struct_ty, field_inits); } @@ -12351,7 +12361,7 @@ fn zirArrayInit( }; try sema.requireRuntimeBlock(block, runtime_src); - try sema.resolveTypeLayout(block, src, elem_ty); + try sema.queueFullTypeResolution(elem_ty); if (is_ref) { const target = sema.mod.getTarget(); @@ -18339,7 +18349,7 @@ fn storePtr2( // TODO handle if the element type requires comptime try sema.requireRuntimeBlock(block, runtime_src); - try sema.resolveTypeLayout(block, src, elem_ty); + try sema.queueFullTypeResolution(elem_ty); _ = try block.addBinOp(air_tag, ptr, operand); } @@ -21907,7 +21917,7 @@ fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { return sema.getTmpAir().typeOf(inst); } -fn getTmpAir(sema: Sema) Air { +pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, @@ -22572,3 +22582,8 @@ fn anonStructFieldIndex( fn kit(sema: *Sema, block: *Block, src: LazySrcLoc) Module.WipAnalysis { return .{ .sema = sema, .block = block, .src = src }; } + +fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { + const inst_ref = try sema.addType(ty); + try sema.types_to_resolve.append(sema.gpa, inst_ref); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index eb8d72a994..f2979d96b1 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -632,7 +632,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. const val = self.air.value(ref).?; const ty = self.air.typeOf(ref); - if (!ty.hasRuntimeBits() and !ty.isInt()) { + if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt()) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -805,13 +805,13 @@ fn genFunctype(gpa: Allocator, fn_ty: Type, target: std.Target) !wasm.Type { defer gpa.free(fn_params); fn_ty.fnParamTypes(fn_params); for (fn_params) |param_type| { - if (!param_type.hasRuntimeBits()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; try params.append(typeToValtype(param_type, target)); } } // return type - if (!want_sret and return_type.hasRuntimeBits()) { + if (!want_sret and return_type.hasRuntimeBitsIgnoreComptime()) { try returns.append(typeToValtype(return_type, target)); } @@ -970,7 +970,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu .Naked => return result, .Unspecified, .C => { for (param_types) |ty| { - if (!ty.hasRuntimeBits()) { + if (!ty.hasRuntimeBitsIgnoreComptime()) { continue; } @@ -1015,7 +1015,7 @@ fn restoreStackPointer(self: *Self) !void { /// /// Asserts Type has codegenbits fn allocStack(self: *Self, ty: Type) !WValue { - assert(ty.hasRuntimeBits()); + assert(ty.hasRuntimeBitsIgnoreComptime()); if (self.initial_stack_value == .none) { try self.initializeStack(); } @@ -1049,7 +1049,7 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue { try self.initializeStack(); } - if (!pointee_ty.hasRuntimeBits()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { return self.allocStack(Type.usize); // create a value containing just the stack pointer. } @@ -1235,18 +1235,18 @@ fn isByRef(ty: Type, target: std.Target) bool { .Struct, .Frame, .Union, - => return ty.hasRuntimeBits(), + => return ty.hasRuntimeBitsIgnoreComptime(), .Int => return if (ty.intInfo(target).bits > 64) true else false, .ErrorUnion => { - const has_tag = ty.errorUnionSet().hasRuntimeBits(); - const has_pl = ty.errorUnionPayload().hasRuntimeBits(); + const has_tag = ty.errorUnionSet().hasRuntimeBitsIgnoreComptime(); + const has_pl = ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime(); if (!has_tag or !has_pl) return false; - return ty.hasRuntimeBits(); + return ty.hasRuntimeBitsIgnoreComptime(); }, .Optional => { if (ty.isPtrLikeOptional()) return false; var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).hasRuntimeBits(); + return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime(); }, .Pointer => { // Slices act like struct and will be passed by reference @@ -1511,7 +1511,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.air.typeOf(un_op).childType(); - if (!ret_ty.hasRuntimeBits()) return WValue.none; + if (!ret_ty.hasRuntimeBitsIgnoreComptime()) return WValue.none; if (!isByRef(ret_ty, self.target)) { const result = try self.load(operand, ret_ty, 0); @@ -1567,7 +1567,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const arg_val = try self.resolveInst(arg_ref); const arg_ty = self.air.typeOf(arg_ref); - if (!arg_ty.hasRuntimeBits()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; switch (arg_val) { .stack_offset => try self.emitWValue(try self.buildPointerOffset(arg_val, 0, .new)), @@ -1591,7 +1591,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. try self.addLabel(.call_indirect, fn_type_index); } - if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBits()) { + if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBitsIgnoreComptime()) { return WValue.none; } else if (ret_ty.isNoReturn()) { try self.addTag(.@"unreachable"); @@ -1625,7 +1625,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro .ErrorUnion => { const err_ty = ty.errorUnionSet(); const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBits()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { return self.store(lhs, rhs, err_ty, 0); } @@ -1638,7 +1638,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro } var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBits()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { return self.store(lhs, rhs, Type.u8, 0); } @@ -1696,7 +1696,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const ty = self.air.getRefType(ty_op.ty); - if (!ty.hasRuntimeBits()) return WValue{ .none = {} }; + if (!ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; if (isByRef(ty, self.target)) { const new_local = try self.allocStack(ty); @@ -2200,7 +2200,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBits()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime()) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs @@ -2257,7 +2257,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const block = self.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (self.air.typeOf(br.operand).hasRuntimeBits()) { + if (self.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { const operand = try self.resolveInst(br.operand); const op = switch (operand) { .stack_offset => try self.buildPointerOffset(operand, 0, .new), @@ -2357,7 +2357,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBits()) return WValue{ .none = {} }; + if (!field_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch { return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(self.target)}); }; @@ -2544,7 +2544,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W // load the error tag value try self.emitWValue(operand); - if (pl_ty.hasRuntimeBits()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load16_u, .{ .offset = operand.offset(), .alignment = err_ty.errorUnionSet().abiAlignment(self.target), @@ -2567,7 +2567,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; const err_align = err_ty.abiAlignment(self.target); const set_size = err_ty.errorUnionSet().abiSize(self.target); const offset = mem.alignForwardGeneric(u64, set_size, err_align); @@ -2585,7 +2585,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); - if (op_is_ptr or !payload_ty.hasRuntimeBits()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } @@ -2599,7 +2599,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const op_ty = self.air.typeOf(ty_op.operand); - if (!op_ty.hasRuntimeBits()) return operand; + if (!op_ty.hasRuntimeBitsIgnoreComptime()) return operand; const err_ty = self.air.getRefType(ty_op.ty); const err_align = err_ty.abiAlignment(self.target); const set_size = err_ty.errorUnionSet().abiSize(self.target); @@ -2624,7 +2624,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.getRefType(ty_op.ty); - if (!err_ty.errorUnionPayload().hasRuntimeBits()) return operand; + if (!err_ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime()) return operand; const err_union = try self.allocStack(err_ty); try self.store(err_union, operand, err_ty.errorUnionSet(), 0); @@ -2690,7 +2690,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) const payload_ty = optional_ty.optionalChild(&buf); // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBits()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 }); } } @@ -2710,7 +2710,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const opt_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; if (opt_ty.isPtrLikeOptional()) return operand; const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target); @@ -2731,7 +2731,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBits() or opt_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.isPtrLikeOptional()) { return operand; } @@ -2745,7 +2745,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue const opt_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } @@ -2769,7 +2769,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const non_null_bit = try self.allocStack(Type.initTag(.u1)); try self.emitWValue(non_null_bit); try self.addImm32(1); @@ -2958,7 +2958,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const slice_local = try self.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBits()) { + if (array_ty.hasRuntimeBitsIgnoreComptime()) { try self.store(slice_local, operand, Type.usize, 0); } @@ -3408,7 +3408,7 @@ fn airWasmMemoryGrow(self: *Self, inst: Air.Inst.Index) !WValue { } fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBits()); + assert(operand_ty.hasRuntimeBitsIgnoreComptime()); assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); @@ -3575,7 +3575,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index cbe6cfc296..2129512f96 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -853,3 +853,9 @@ test "comptime pointer load through elem_ptr" { assert(ptr[1].x == 2); } } + +test "debug variable type resolved through indirect zero-bit types" { + const T = struct { key: []void }; + const slice: []const T = &[_]T{}; + _ = slice; +} |
