diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2022-02-28 19:22:16 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2022-02-28 19:22:16 -0700 |
| commit | 157f66ec077ad02f08891bec1a426c0ffef98e09 (patch) | |
| tree | 75b207ed6ccbbd2af83a338e09b86bc8de2965c4 /src | |
| parent | a7ca40b2817dbf3f2085141f32f20f431707391b (diff) | |
| download | zig-157f66ec077ad02f08891bec1a426c0ffef98e09.tar.gz zig-157f66ec077ad02f08891bec1a426c0ffef98e09.zip | |
Sema: fix pointer type hash and equality functions
Several issues with pointer types are fixed:
Prior to this commit, Zig would not canonicalize a pointer type with
an explicit alignment to alignment=0 if it matched the pointee ABI
alignment. In order to fix this, `Type.ptr` now takes a Target
parameter. I also moved the host_size canonicalization to `Type.ptr`
since target is now available. Similarly, is_allowzero in the case of
C pointers is now treated as a canonicalization done by the function
rather than a precondition.
in-memory coercion for pointers now properly checks ABI alignment
of pointee types instead of incorrectly treating the 0 value as an
alignment.
Type equality is completely reworked based on the tag() rather than the
zigTypeTag(). It's still semantically based on zigTypeTag() but that
knowledge is implied rather than dictating the control flow of the
logic. Importantly, this fixes cases for opaques, structs, tuples,
enums, and unions, where type equality was incorrectly returning based
on whether the tag() values were equal.
Additionally, pointer type equality now takes into account alignment.
Because we canonicalize non-zero alignment which equals pointee type ABI
alignment to alignment=0, this now can be a simple integer comparison.
Type hashing is implemented for pointers and floats. Array types now
additionally hash their sentinels.
This regressed some behavior tests that were passing but only because
of bugs regarding type equality.
The C backend has a noticeable problem with lowering differently-aligned
pointers (particularly slices) as the same type, causing C compilation
errors due to duplicate declarations.
Diffstat (limited to 'src')
| -rw-r--r-- | src/Sema.zig | 203 | ||||
| -rw-r--r-- | src/codegen.zig | 2 | ||||
| -rw-r--r-- | src/type.zig | 535 |
3 files changed, 478 insertions, 262 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index 56cc3b13bb..6ef4798da6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1555,7 +1555,8 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const bin_inst = sema.code.instructions.items(.data)[inst].bin; const pointee_ty = try sema.resolveType(block, src, bin_inst.lhs); const ptr = sema.resolveInst(bin_inst.rhs); - const addr_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); + const target = sema.mod.getTarget(); + const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { @@ -1575,7 +1576,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE try inferred_alloc.stored_inst_list.append(sema.arena, operand); try sema.requireRuntimeBlock(block, src); - const ptr_ty = try Type.ptr(sema.arena, .{ + const ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = pointee_ty, .@"align" = inferred_alloc.alignment, .@"addrspace" = addr_space, @@ -1593,7 +1594,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE try pointee_ty.copy(anon_decl.arena()), Value.undef, ); - const ptr_ty = try Type.ptr(sema.arena, .{ + const ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = pointee_ty, .@"align" = iac.data.alignment, .@"addrspace" = addr_space, @@ -1642,7 +1643,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } } - const ptr_ty = try Type.ptr(sema.arena, .{ + const ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = pointee_ty, .@"addrspace" = addr_space, }); @@ -1663,7 +1664,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE } const ty_op = air_datas[trash_inst].ty_op; const operand_ty = sema.getTmpAir().typeOf(ty_op.operand); - const ptr_operand_ty = try Type.ptr(sema.arena, .{ + const ptr_operand_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = operand_ty, .@"addrspace" = addr_space, }); @@ -2225,9 +2226,10 @@ fn zirRetPtr( return sema.analyzeComptimeAlloc(block, fn_ret_ty, 0, src); } - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = sema.fn_ret_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); if (block.inlining != null) { @@ -2389,10 +2391,11 @@ fn zirAllocExtended( if (!small.is_const) { try sema.validateVarType(block, ty_src, var_ty, false); } - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = var_ty, .@"align" = alignment, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, src, var_ty); @@ -2450,9 +2453,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = var_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); @@ -2471,9 +2475,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.analyzeComptimeAlloc(block, var_ty, 0, ty_src); } try sema.validateVarType(block, ty_src, var_ty, false); - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = var_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); @@ -2542,7 +2547,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.mod.declareDeclDependency(sema.owner_decl, decl); const final_elem_ty = try decl.ty.copy(sema.arena); - const final_ptr_ty = try Type.ptr(sema.arena, .{ + const final_ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = final_elem_ty, .mutable = var_is_mut, .@"align" = iac.data.alignment, @@ -2565,7 +2570,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); - const final_ptr_ty = try Type.ptr(sema.arena, .{ + const final_ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = final_elem_ty, .mutable = var_is_mut, .@"align" = inferred_alloc.data.alignment, @@ -3335,10 +3340,11 @@ fn storeToInferredAlloc( // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = operand_ty, .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, operand); @@ -5444,7 +5450,8 @@ fn analyzeOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const child_pointer = try Type.ptr(sema.arena, target, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), @@ -5509,7 +5516,8 @@ fn zirOptionalPayload( return sema.failWithExpectedOptionalType(block, src, operand_ty); } const ptr_info = operand_ty.ptrInfo().data; - break :t try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + break :t try Type.ptr(sema.arena, target, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -5607,7 +5615,8 @@ fn analyzeErrUnionPayloadPtr( return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); const payload_ty = operand_ty.elemType().errorUnionPayload(); - const operand_pointer_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const operand_pointer_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), .@"addrspace" = operand_ty.ptrAddressSpace(), @@ -6517,7 +6526,8 @@ fn zirSwitchCapture( if (is_ref) { assert(operand_is_ref); - const field_ty_ptr = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const field_ty_ptr = try Type.ptr(sema.arena, target, .{ .pointee_type = field.ty, .@"addrspace" = .generic, .mutable = operand_ptr_ty.ptrIsMutable(), @@ -11327,7 +11337,8 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple; const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type); - const ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ty = try Type.ptr(sema.arena, target, .{ .pointee_type = elem_type, .@"addrspace" = .generic, .mutable = inst_data.is_mutable, @@ -11343,6 +11354,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air defer tracy.end(); const src: LazySrcLoc = .unneeded; + const elem_ty_src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); @@ -11366,41 +11378,40 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer); } else .generic; - const bit_start = if (inst_data.flags.has_bit_range) blk: { + const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; - var host_size: u16 = if (inst_data.flags.has_bit_range) blk: { + const host_size: u16 = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; - const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type); - - if (host_size != 0) { - if (bit_start >= host_size * 8) { - return sema.fail(block, src, "bit offset starts after end of host integer", .{}); - } - const target = sema.mod.getTarget(); - const elem_type_bits = elem_type.bitSize(target); - if (host_size * 8 == elem_type_bits) { - assert(bit_start == 0); - host_size = 0; - } + if (host_size != 0 and bit_offset >= host_size * 8) { + return sema.fail(block, src, "bit offset starts after end of host integer", .{}); } - const ty = try Type.ptr(sema.arena, .{ - .pointee_type = elem_type, + const unresolved_elem_ty = try sema.resolveType(block, elem_ty_src, extra.data.elem_type); + const elem_ty = if (abi_align == 0) + unresolved_elem_ty + else t: { + const elem_ty = try sema.resolveTypeFields(block, elem_ty_src, unresolved_elem_ty); + try sema.resolveTypeLayout(block, elem_ty_src, elem_ty); + break :t elem_ty; + }; + const target = sema.mod.getTarget(); + const ty = try Type.ptr(sema.arena, target, .{ + .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = abi_align, .@"addrspace" = address_space, - .bit_offset = bit_start, + .bit_offset = bit_offset, .host_size = host_size, .mutable = inst_data.flags.is_mutable, - .@"allowzero" = inst_data.flags.is_allowzero or inst_data.size == .C, + .@"allowzero" = inst_data.flags.is_allowzero, .@"volatile" = inst_data.flags.is_volatile, .size = inst_data.size, }); @@ -11721,15 +11732,16 @@ fn zirArrayInit( try sema.resolveTypeLayout(block, src, elem_ty); if (is_ref) { - const alloc_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = array_ty, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, target, .{ .mutable = true, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = elem_ty, }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -11788,12 +11800,13 @@ fn zirArrayInitAnon( try sema.requireRuntimeBlock(block, runtime_src); if (is_ref) { + const target = sema.mod.getTarget(); const alloc = try block.addTy(.alloc, tuple_ty); for (operands) |operand, i_usize| { const i = @intCast(u32, i_usize); - const field_ptr_ty = try Type.ptr(sema.arena, .{ + const field_ptr_ty = try Type.ptr(sema.arena, target, .{ .mutable = true, - .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = types[i], }); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); @@ -12068,6 +12081,7 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const union_val = val.cast(Value.Payload.Union).?.data; const tag_ty = type_info_ty.unionTagType().?; const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?; + const target = sema.mod.getTarget(); switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -12146,11 +12160,14 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, .{ .@"addrspace" = .generic, .pointee_type = child_ty }); + const ptr_ty = try Type.ptr(sema.arena, target, .{ + .@"addrspace" = .generic, + .pointee_type = child_ty, + }); actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; } - const ty = try Type.ptr(sema.arena, .{ + const ty = try Type.ptr(sema.arena, target, .{ .size = ptr_size, .mutable = !is_const_val.toBool(), .@"volatile" = is_volatile_val.toBool(), @@ -12176,7 +12193,10 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I var buffer: Value.ToTypeBuffer = undefined; const child_ty = try child_val.toType(&buffer).copy(sema.arena); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { - const ptr_ty = try Type.ptr(sema.arena, .{ .@"addrspace" = .generic, .pointee_type = child_ty }); + const ptr_ty = try Type.ptr(sema.arena, target, .{ + .@"addrspace" = .generic, + .pointee_type = child_ty, + }); break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; } else null; @@ -12468,7 +12488,8 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // TODO insert safety check that the alignment is correct const ptr_info = ptr_ty.ptrInfo().data; - const dest_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const dest_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = ptr_info.pointee_type, .@"align" = dest_align, .@"addrspace" = ptr_info.@"addrspace", @@ -13408,11 +13429,12 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr ptr_ty_data.@"align" = @intCast(u32, field.abi_align.toUnsignedInt()); } - const actual_field_ptr_ty = try Type.ptr(sema.arena, ptr_ty_data); + const target = sema.mod.getTarget(); + const actual_field_ptr_ty = try Type.ptr(sema.arena, target, ptr_ty_data); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src); ptr_ty_data.pointee_type = struct_ty; - const result_ptr = try Type.ptr(sema.arena, ptr_ty_data); + const result_ptr = try Type.ptr(sema.arena, target, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { const payload = field_ptr_val.castTag(.field_ptr).?.data; @@ -13509,7 +13531,8 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); try sema.checkPtrOperand(block, src_src, uncasted_src_ptr_ty); const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; - const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const wanted_src_ptr_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = dest_ptr_ty.elemType2(), .@"align" = src_ptr_info.@"align", .@"addrspace" = src_ptr_info.@"addrspace", @@ -14149,9 +14172,10 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.ptr(arena, .{ + const target = mod.getTarget(); + const ptr_stack_trace_ty = try Type.ptr(arena, target, .{ .pointee_type = stack_trace_ty, - .@"addrspace" = target_util.defaultAddressSpace(mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic + .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( try Type.optional(arena, ptr_stack_trace_ty), @@ -14405,6 +14429,8 @@ fn fieldPtr( else object_ty; + const target = sema.mod.getTarget(); + switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { @@ -14444,7 +14470,7 @@ fn fieldPtr( } try sema.requireRuntimeBlock(block, src); - const result_ty = try Type.ptr(sema.arena, .{ + const result_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = slice_ptr_ty, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), @@ -14463,7 +14489,7 @@ fn fieldPtr( } try sema.requireRuntimeBlock(block, src); - const result_ty = try Type.ptr(sema.arena, .{ + const result_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = Type.usize, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), @@ -14692,7 +14718,8 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - const ptr_field_ty = try Type.ptr(arena, .{ + const target = sema.mod.getTarget(); + const ptr_field_ty = try Type.ptr(arena, target, .{ .pointee_type = field_ty, .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), @@ -14831,7 +14858,8 @@ fn structFieldPtrByIndex( } } - const ptr_field_ty = try Type.ptr(sema.arena, ptr_ty_data); + const target = sema.mod.getTarget(); + const ptr_field_ty = try Type.ptr(sema.arena, target, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return sema.addConstant( @@ -14959,7 +14987,8 @@ fn unionFieldPtr( const field_index = @intCast(u32, field_index_big); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Type.ptr(arena, .{ + const target = sema.mod.getTarget(); + const ptr_field_ty = try Type.ptr(arena, target, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(), .@"addrspace" = union_ptr_ty.ptrAddressSpace(), @@ -15033,7 +15062,8 @@ fn elemPtr( .Pointer => { // In all below cases, we have to deref the ptr operand to get the actual array pointer. const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src); - const result_ty = try array_ty.elemPtrType(sema.arena); + const target = sema.mod.getTarget(); + const result_ty = try array_ty.elemPtrType(sema.arena, target); switch (array_ty.ptrSize()) { .Slice => { const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array); @@ -15172,7 +15202,8 @@ fn tupleFieldPtr( } const field_ty = tuple_info.types[field_index]; - const ptr_field_ty = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_field_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(), .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), @@ -15264,7 +15295,8 @@ fn elemPtrArray( elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_ptr_ty = sema.typeOf(array_ptr); - const result_ty = try array_ptr_ty.elemPtrType(sema.arena); + const target = sema.mod.getTarget(); + const result_ty = try array_ptr_ty.elemPtrType(sema.arena, target); if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { @@ -15957,15 +15989,28 @@ fn coerceInMemoryAllowedPtrs( // In this case, if they share the same child type, no need to resolve // pointee type alignment. Otherwise both pointee types must have their alignment // resolved and we compare the alignment numerically. - if (src_info.@"align" != 0 or dest_info.@"align" != 0 or - !dest_info.pointee_type.eql(src_info.pointee_type)) - { - const src_align = src_info.@"align"; - const dest_align = dest_info.@"align"; + alignment: { + if (src_info.@"align" == 0 and dest_info.@"align" == 0 and + dest_info.pointee_type.eql(src_info.pointee_type)) + { + break :alignment; + } + + const src_align = if (src_info.@"align" != 0) + src_info.@"align" + else + src_info.pointee_type.abiAlignment(target); + + const dest_align = if (dest_info.@"align" != 0) + dest_info.@"align" + else + dest_info.pointee_type.abiAlignment(target); if (dest_align > src_align) { return .no_match; } + + break :alignment; } return .ok; @@ -16874,6 +16919,7 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); try sema.ensureDeclAnalyzed(decl); + const target = sema.mod.getTarget(); const decl_tv = try decl.typedValue(); if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; @@ -16881,7 +16927,7 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { 0 else @intCast(u32, decl.align_val.toUnsignedInt()); - const ty = try Type.ptr(sema.arena, .{ + const ty = try Type.ptr(sema.arena, target, .{ .pointee_type = decl_tv.ty, .mutable = variable.is_mutable, .@"addrspace" = decl.@"addrspace", @@ -16890,7 +16936,7 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); } return sema.addConstant( - try Type.ptr(sema.arena, .{ + try Type.ptr(sema.arena, target, .{ .pointee_type = decl_tv.ty, .mutable = false, .@"addrspace" = decl.@"addrspace", @@ -16918,12 +16964,13 @@ fn analyzeRef( try sema.requireRuntimeBlock(block, src); const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = operand_ty, .mutable = false, .@"addrspace" = address_space, }); - const mut_ptr_type = try Type.ptr(sema.arena, .{ + const mut_ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = operand_ty, .@"addrspace" = address_space, }); @@ -17174,11 +17221,12 @@ fn analyzeSlice( const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const target = sema.mod.getTarget(); if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(); - const return_ty = try Type.ptr(sema.arena, .{ + const return_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty), .sentinel = null, .@"align" = new_ptr_ty_info.@"align", @@ -17206,7 +17254,7 @@ fn analyzeSlice( return sema.fail(block, ptr_src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try Type.ptr(sema.arena, .{ + const return_ty = try Type.ptr(sema.arena, target, .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = new_ptr_ty_info.@"align", @@ -17904,14 +17952,14 @@ fn resolvePeerTypes( else => unreachable, }; - return Type.ptr(sema.arena, info.data); + return Type.ptr(sema.arena, target, info.data); } if (make_the_slice_const) { // turn []T => []const T var info = chosen_ty.ptrInfo(); info.data.mutable = false; - return Type.ptr(sema.arena, info.data); + return Type.ptr(sema.arena, target, info.data); } return chosen_ty; @@ -19121,7 +19169,8 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(block, src, var_type); - const ptr_type = try Type.ptr(sema.arena, .{ + const target = sema.mod.getTarget(); + const ptr_type = try Type.ptr(sema.arena, target, .{ .pointee_type = var_type, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), .@"align" = alignment, diff --git a/src/codegen.zig b/src/codegen.zig index 2484cb0e59..67119bf9fa 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -657,7 +657,7 @@ fn lowerDeclRef( .data = typed_value.val.sliceLen(), }; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ - .ty = Type.initTag(.usize), + .ty = Type.usize, .val = Value.initPayload(&slice_len.base), }, code, debug_output)) { .appended => {}, diff --git a/src/type.zig b/src/type.zig index fb3ab5d28f..1c8e1bfa50 100644 --- a/src/type.zig +++ b/src/type.zig @@ -491,94 +491,115 @@ pub const Type = extern union { pub fn eql(a: Type, b: Type) bool { // As a shortcut, if the small tags / addresses match, we're done. - if (a.tag_if_small_enough == b.tag_if_small_enough) - return true; - const zig_tag_a = a.zigTypeTag(); - const zig_tag_b = b.zigTypeTag(); - if (zig_tag_a != zig_tag_b) - return false; - switch (zig_tag_a) { - .EnumLiteral => return true, - .Type => return true, - .Void => return true, - .Bool => return true, - .NoReturn => return true, - .ComptimeFloat => return true, - .ComptimeInt => return true, - .Undefined => return true, - .Null => return true, - .AnyFrame => { - return a.elemType().eql(b.elemType()); - }, - .Pointer => { - const info_a = a.ptrInfo().data; - const info_b = b.ptrInfo().data; - if (!info_a.pointee_type.eql(info_b.pointee_type)) - return false; - if (info_a.size != info_b.size) - return false; - if (info_a.mutable != info_b.mutable) - return false; - if (info_a.@"volatile" != info_b.@"volatile") - return false; - if (info_a.@"allowzero" != info_b.@"allowzero") - return false; - if (info_a.bit_offset != info_b.bit_offset) - return false; - if (info_a.host_size != info_b.host_size) - return false; - if (info_a.@"addrspace" != info_b.@"addrspace") - return false; + if (a.tag_if_small_enough == b.tag_if_small_enough) return true; - const sentinel_a = info_a.sentinel; - const sentinel_b = info_b.sentinel; - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - if (!sa.eql(sb, info_a.pointee_type)) - return false; - } else { - return false; - } - } else { - if (sentinel_b != null) - return false; - } + switch (a.tag()) { + .generic_poison => unreachable, - return true; + // Detect that e.g. u64 != usize, even if the bits match on a particular target. + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + + .bool, + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .@"null", + .@"undefined", + .@"anyopaque", + .@"anyframe", + .enum_literal, + => |a_tag| { + assert(a_tag != b.tag()); // because of the comparison at the top of the function. + return false; }, - .Int => { - // Detect that e.g. u64 != usize, even if the bits match on a particular target. - const a_is_named_int = a.isNamedInt(); - const b_is_named_int = b.isNamedInt(); - if (a_is_named_int != b_is_named_int) - return false; - if (a_is_named_int) - return a.tag() == b.tag(); - // Remaining cases are arbitrary sized integers. - // The target will not be branched upon, because we handled target-dependent cases above. + + .u1, + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .u128, + .i128, + .int_signed, + .int_unsigned, + => { + if (b.zigTypeTag() != .Int) return false; + if (b.isNamedInt()) return false; + + // Arbitrary sized integers. The target will not be branched upon, + // because we handled target-dependent cases above. const info_a = a.intInfo(@as(Target, undefined)); const info_b = b.intInfo(@as(Target, undefined)); return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; }, - .Array, .Vector => { - if (a.arrayLen() != b.arrayLen()) - return false; - const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType())) - return false; - const sentinel_a = a.sentinel(); - const sentinel_b = b.sentinel(); - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - return sa.eql(sb, elem_ty); - } else { - return false; - } - } else { - return sentinel_b == null; + + .error_set, + .error_set_single, + .anyerror, + .error_set_inferred, + .error_set_merged, + => { + if (b.zigTypeTag() != .ErrorSet) return false; + + // TODO: revisit the language specification for how to evaluate equality + // for error set types. + + if (a.tag() == .anyerror and b.tag() == .anyerror) { + return true; + } + + if (a.tag() == .error_set and b.tag() == .error_set) { + return a.castTag(.error_set).?.data.owner_decl == b.castTag(.error_set).?.data.owner_decl; } + + if (a.tag() == .error_set_inferred and b.tag() == .error_set_inferred) { + return a.castTag(.error_set_inferred).?.data == b.castTag(.error_set_inferred).?.data; + } + + if (a.tag() == .error_set_single and b.tag() == .error_set_single) { + const a_data = a.castTag(.error_set_single).?.data; + const b_data = b.castTag(.error_set_single).?.data; + return std.mem.eql(u8, a_data, b_data); + } + return false; }, - .Fn => { + + .@"opaque" => { + const opaque_obj_a = a.castTag(.@"opaque").?.data; + const opaque_obj_b = (b.castTag(.@"opaque") orelse return false).data; + return opaque_obj_a == opaque_obj_b; + }, + + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + => { + if (b.zigTypeTag() != .Fn) return false; + const a_info = a.fnInfo(); const b_info = b.fnInfo(); @@ -613,76 +634,105 @@ pub const Type = extern union { return true; }, - .Optional => { - var buf_a: Payload.ElemType = undefined; - var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b)); - }, - .Struct => { - if (a.castTag(.@"struct")) |a_payload| { - if (b.castTag(.@"struct")) |b_payload| { - return a_payload.data == b_payload.data; - } - } - if (a.castTag(.tuple)) |a_payload| { - if (b.castTag(.tuple)) |b_payload| { - if (a_payload.data.types.len != b_payload.data.types.len) return false; - - for (a_payload.data.types) |a_ty, i| { - const b_ty = b_payload.data.types[i]; - if (!eql(a_ty, b_ty)) return false; - } - for (a_payload.data.values) |a_val, i| { - const ty = a_payload.data.types[i]; - const b_val = b_payload.data.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.tag() == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty)) return false; - } - } - } + .array, + .array_u8_sentinel_0, + .array_u8, + .array_sentinel, + .vector, + => { + if (a.zigTypeTag() != b.zigTypeTag()) return false; - return true; + if (a.arrayLen() != b.arrayLen()) + return false; + const elem_ty = a.elemType(); + if (!elem_ty.eql(b.elemType())) + return false; + const sentinel_a = a.sentinel(); + const sentinel_b = b.sentinel(); + if (sentinel_a) |sa| { + if (sentinel_b) |sb| { + return sa.eql(sb, elem_ty); + } else { + return false; } + } else { + return sentinel_b == null; } - return a.tag() == b.tag(); }, - .Enum => { - if (a.cast(Payload.EnumFull)) |a_payload| { - if (b.cast(Payload.EnumFull)) |b_payload| { - return a_payload.data == b_payload.data; - } - } - if (a.cast(Payload.EnumSimple)) |a_payload| { - if (b.cast(Payload.EnumSimple)) |b_payload| { - return a_payload.data == b_payload.data; + + .single_const_pointer_to_comptime_int, + .const_slice_u8, + .const_slice_u8_sentinel_0, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .pointer, + .inferred_alloc_const, + .inferred_alloc_mut, + .manyptr_u8, + .manyptr_const_u8, + .manyptr_const_u8_sentinel_0, + => { + if (b.zigTypeTag() != .Pointer) return false; + + const info_a = a.ptrInfo().data; + const info_b = b.ptrInfo().data; + if (!info_a.pointee_type.eql(info_b.pointee_type)) + return false; + if (info_a.@"align" != info_b.@"align") + return false; + if (info_a.@"addrspace" != info_b.@"addrspace") + return false; + if (info_a.bit_offset != info_b.bit_offset) + return false; + if (info_a.host_size != info_b.host_size) + return false; + if (info_a.@"allowzero" != info_b.@"allowzero") + return false; + if (info_a.mutable != info_b.mutable) + return false; + if (info_a.@"volatile" != info_b.@"volatile") + return false; + if (info_a.size != info_b.size) + return false; + + const sentinel_a = info_a.sentinel; + const sentinel_b = info_b.sentinel; + if (sentinel_a) |sa| { + if (sentinel_b) |sb| { + if (!sa.eql(sb, info_a.pointee_type)) + return false; + } else { + return false; } + } else { + if (sentinel_b != null) + return false; } - return a.tag() == b.tag(); - }, - .Opaque => { - const opaque_obj_a = a.castTag(.@"opaque").?.data; - const opaque_obj_b = b.castTag(.@"opaque").?.data; - return opaque_obj_a == opaque_obj_b; + + return true; }, - .Union => { - if (a.cast(Payload.Union)) |a_payload| { - if (b.cast(Payload.Union)) |b_payload| { - return a_payload.data == b_payload.data; - } - } - return a.tag() == b.tag(); + + .optional, + .optional_single_const_pointer, + .optional_single_mut_pointer, + => { + if (b.zigTypeTag() != .Optional) return false; + + var buf_a: Payload.ElemType = undefined; + var buf_b: Payload.ElemType = undefined; + return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b)); }, - .ErrorUnion => { + + .anyerror_void_error_union, .error_union => { + if (b.zigTypeTag() != .ErrorUnion) return false; + const a_set = a.errorUnionSet(); const b_set = b.errorUnionSet(); if (!a_set.eql(b_set)) return false; @@ -693,34 +743,100 @@ pub const Type = extern union { return true; }, - .ErrorSet => { - // TODO: revisit the language specification for how to evaluate equality - // for error set types. - if (a.tag() == .anyerror and b.tag() == .anyerror) { - return true; - } + .anyframe_T => { + if (b.zigTypeTag() != .AnyFrame) return false; + return a.childType().eql(b.childType()); + }, - if (a.tag() == .error_set and b.tag() == .error_set) { - return a.castTag(.error_set).?.data.owner_decl == b.castTag(.error_set).?.data.owner_decl; - } + .empty_struct => { + const a_namespace = a.castTag(.empty_struct).?.data; + const b_namespace = (b.castTag(.empty_struct) orelse return false).data; + return a_namespace == b_namespace; + }, + .@"struct" => { + const a_struct_obj = a.castTag(.@"struct").?.data; + const b_struct_obj = (b.castTag(.@"struct") orelse return false).data; + return a_struct_obj == b_struct_obj; + }, + .tuple, .empty_struct_literal => { + if (!b.isTuple()) return false; - if (a.tag() == .error_set_inferred and b.tag() == .error_set_inferred) { - return a.castTag(.error_set_inferred).?.data == b.castTag(.error_set_inferred).?.data; + const a_tuple = a.tupleFields(); + const b_tuple = b.tupleFields(); + + if (a_tuple.types.len != b_tuple.types.len) return false; + + for (a_tuple.types) |a_ty, i| { + const b_ty = b_tuple.types[i]; + if (!eql(a_ty, b_ty)) return false; } - if (a.tag() == .error_set_single and b.tag() == .error_set_single) { - const a_data = a.castTag(.error_set_single).?.data; - const b_data = b.castTag(.error_set_single).?.data; - return std.mem.eql(u8, a_data, b_data); + for (a_tuple.values) |a_val, i| { + const ty = a_tuple.types[i]; + const b_val = b_tuple.values[i]; + if (a_val.tag() == .unreachable_value) { + if (b_val.tag() == .unreachable_value) { + continue; + } else { + return false; + } + } else { + if (b_val.tag() == .unreachable_value) { + return false; + } else { + if (!Value.eql(a_val, b_val, ty)) return false; + } + } } - return false; + + return true; }, - .Float => return a.tag() == b.tag(), - .BoundFn, - .Frame, - => std.debug.panic("TODO implement Type equality comparison of {} and {}", .{ a, b }), + // we can't compare these based on tags because it wouldn't detect if, + // for example, a was resolved into .@"struct" but b was one of these tags. + .call_options, + .prefetch_options, + .export_options, + .extern_options, + => unreachable, // needed to resolve the type before now + + .enum_full, .enum_nonexhaustive => { + const a_enum_obj = a.cast(Payload.EnumFull).?.data; + const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; + return a_enum_obj == b_enum_obj; + }, + .enum_simple => { + const a_enum_obj = a.cast(Payload.EnumSimple).?.data; + const b_enum_obj = (b.cast(Payload.EnumSimple) orelse return false).data; + return a_enum_obj == b_enum_obj; + }, + .enum_numbered => { + const a_enum_obj = a.cast(Payload.EnumNumbered).?.data; + const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; + return a_enum_obj == b_enum_obj; + }, + // we can't compare these based on tags because it wouldn't detect if, + // for example, a was resolved into .enum_simple but b was one of these tags. + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + => unreachable, // needed to resolve the type before now + + .@"union", .union_tagged => { + const a_union_obj = a.cast(Payload.Union).?.data; + const b_union_obj = (b.cast(Payload.Union) orelse return false).data; + return a_union_obj == b_union_obj; + }, + // we can't compare these based on tags because it wouldn't detect if, + // for example, a was resolved into .union_tagged but b was one of these tags. + .type_info => unreachable, // needed to resolve the type before now + + .bound_fn => unreachable, + .var_args_param => unreachable, // can be any type } } @@ -730,8 +846,8 @@ pub const Type = extern union { return hasher.final(); } - pub fn hashWithHasher(self: Type, hasher: *std.hash.Wyhash) void { - const zig_type_tag = self.zigTypeTag(); + pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash) void { + const zig_type_tag = ty.zigTypeTag(); std.hash.autoHash(hasher, zig_type_tag); switch (zig_type_tag) { .Type, @@ -745,41 +861,58 @@ pub const Type = extern union { => {}, // The zig type tag is all that is needed to distinguish. .Pointer => { - // TODO implement more pointer type hashing + const info = ty.ptrInfo().data; + hashWithHasher(info.pointee_type, hasher); + hashSentinel(info.sentinel, info.pointee_type, hasher); + std.hash.autoHash(hasher, info.@"align"); + std.hash.autoHash(hasher, info.@"addrspace"); + std.hash.autoHash(hasher, info.bit_offset); + std.hash.autoHash(hasher, info.host_size); + std.hash.autoHash(hasher, info.@"allowzero"); + std.hash.autoHash(hasher, info.mutable); + std.hash.autoHash(hasher, info.@"volatile"); + std.hash.autoHash(hasher, info.size); }, .Int => { // Detect that e.g. u64 != usize, even if the bits match on a particular target. - if (self.isNamedInt()) { - std.hash.autoHash(hasher, self.tag()); + if (ty.isNamedInt()) { + std.hash.autoHash(hasher, ty.tag()); } else { // Remaining cases are arbitrary sized integers. // The target will not be branched upon, because we handled target-dependent cases above. - const info = self.intInfo(@as(Target, undefined)); + const info = ty.intInfo(@as(Target, undefined)); std.hash.autoHash(hasher, info.signedness); std.hash.autoHash(hasher, info.bits); } }, .Array, .Vector => { - std.hash.autoHash(hasher, self.arrayLen()); - std.hash.autoHash(hasher, self.elemType().hash()); - // TODO hash array sentinel + const elem_ty = ty.elemType(); + std.hash.autoHash(hasher, ty.arrayLen()); + hashWithHasher(elem_ty, hasher); + hashSentinel(ty.sentinel(), elem_ty, hasher); }, .Fn => { - std.hash.autoHash(hasher, self.fnReturnType().hash()); - std.hash.autoHash(hasher, self.fnCallingConvention()); - const params_len = self.fnParamLen(); - std.hash.autoHash(hasher, params_len); - var i: usize = 0; - while (i < params_len) : (i += 1) { - std.hash.autoHash(hasher, self.fnParamType(i).hash()); + const fn_info = ty.fnInfo(); + hashWithHasher(fn_info.return_type, hasher); + std.hash.autoHash(hasher, fn_info.alignment); + std.hash.autoHash(hasher, fn_info.cc); + std.hash.autoHash(hasher, fn_info.is_var_args); + std.hash.autoHash(hasher, fn_info.is_generic); + + std.hash.autoHash(hasher, fn_info.param_types.len); + for (fn_info.param_types) |param_ty, i| { + std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); + if (param_ty.tag() == .generic_poison) continue; + hashWithHasher(param_ty, hasher); } - std.hash.autoHash(hasher, self.fnIsVarArgs()); }, .Optional => { var buf: Payload.ElemType = undefined; - std.hash.autoHash(hasher, self.optionalChild(&buf).hash()); + hashWithHasher(ty.optionalChild(&buf), hasher); + }, + .Float => { + std.hash.autoHash(hasher, ty.tag()); }, - .Float, .Struct, .ErrorUnion, .ErrorSet, @@ -796,6 +929,15 @@ pub const Type = extern union { } } + fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash) void { + if (opt_val) |s| { + std.hash.autoHash(hasher, true); + s.hash(ty, hasher); + } else { + std.hash.autoHash(hasher, false); + } + } + pub const HashContext64 = struct { pub fn hash(self: @This(), t: Type) u64 { _ = self; @@ -2834,8 +2976,8 @@ pub const Type = extern union { /// For [*]T, returns *T /// For []T, returns *T /// Handles const-ness and address spaces in particular. - pub fn elemPtrType(ptr_ty: Type, arena: Allocator) !Type { - return try Type.ptr(arena, .{ + pub fn elemPtrType(ptr_ty: Type, arena: Allocator, target: Target) !Type { + return try Type.ptr(arena, target, .{ .pointee_type = ptr_ty.elemType2(), .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), @@ -4635,6 +4777,8 @@ pub const Type = extern union { pointee_type: Type, sentinel: ?Value = null, /// If zero use pointee_type.abiAlignment() + /// When creating pointer types, if alignment is equal to pointee type + /// abi alignment, this value should be set to 0 instead. @"align": u32 = 0, /// See src/target.zig defaultAddressSpace function for how to obtain /// an appropriate value for this field. @@ -4643,6 +4787,8 @@ pub const Type = extern union { /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. host_size: u16 = 0, @"allowzero": bool = false, mutable: bool = true, // TODO rename this to const, not mutable @@ -4739,10 +4885,30 @@ pub const Type = extern union { pub const @"type" = initTag(.type); pub const @"anyerror" = initTag(.anyerror); - pub fn ptr(arena: Allocator, d: Payload.Pointer.Data) !Type { - assert(d.host_size == 0 or d.bit_offset < d.host_size * 8); + pub fn ptr(arena: Allocator, target: Target, data: Payload.Pointer.Data) !Type { + var d = data; + if (d.size == .C) { - assert(d.@"allowzero"); // All C pointers must set allowzero to true. + d.@"allowzero" = true; + } + + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee + // type, we change it to 0 here. If this causes an assertion trip because the + // pointee type needs to be resolved more, that needs to be done before calling + // this ptr() function. + if (d.@"align" != 0 and d.@"align" == d.pointee_type.abiAlignment(target)) { + d.@"align" = 0; + } + + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + if (d.host_size != 0) { + assert(d.bit_offset < d.host_size * 8); + if (d.host_size * 8 == d.pointee_type.bitSize(target)) { + assert(d.bit_offset == 0); + d.host_size = 0; + } } if (d.@"align" == 0 and d.@"addrspace" == .generic and @@ -4789,6 +4955,7 @@ pub const Type = extern union { return Type.initPayload(&type_payload.base); } } + return Type.Tag.pointer.create(arena, d); } |
