From d0e74ffe52d0ae0d876d4e3f7ef5d32b5f5460a5 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 8 Apr 2024 16:14:39 +0100 Subject: compiler: rework comptime pointer representation and access We've got a big one here! This commit reworks how we represent pointers in the InternPool, and rewrites the logic for loading and storing from them at comptime. Firstly, the pointer representation. Previously, pointers were represented in a highly structured manner: pointers to fields, array elements, etc, were explicitly represented. This works well for simple cases, but is quite difficult to handle in the cases of unusual reinterpretations, pointer casts, offsets, etc. Therefore, pointers are now represented in a more "flat" manner. For types without well-defined layouts -- such as comptime-only types, automatic-layout aggregates, and so on -- we still use this "hierarchical" structure. However, for types with well-defined layouts, we use a byte offset associated with the pointer. This allows the comptime pointer access logic to deal with reinterpreted pointers far more gracefully, because the "base address" of a pointer -- for instance a `field` -- is a single value which pointer accesses cannot exceed since the parent has undefined layout. This strategy is also more useful to most backends -- see the updated logic in `codegen.zig` and `codegen/llvm.zig`. For backends which do prefer a chain of field and elements accesses for lowering pointer values, such as SPIR-V, there is a helpful function in `Value` which creates a strategy to derive a pointer value using ideally only field and element accesses. This is actually more correct than the previous logic, since it correctly handles pointer casts which, after the dust has settled, end up referring exactly to an aggregate field or array element. In terms of the pointer access code, it has been rewritten from the ground up. The old logic had become rather a mess of special cases being added whenever bugs were hit, and was still riddled with bugs. The new logic was written to handle the "difficult" cases correctly, the most notable of which is restructuring of a comptime-only array (for instance, converting a `[3][2]comptime_int` to a `[2][3]comptime_int`. Currently, the logic for loading and storing work somewhat differently, but a future change will likely improve the loading logic to bring it more in line with the store strategy. As far as I can tell, the rewrite has fixed all bugs exposed by #19414. As a part of this, the comptime bitcast logic has also been rewritten. Previously, bitcasts simply worked by serializing the entire value into an in-memory buffer, then deserializing it. This strategy has two key weaknesses: pointers, and undefined values. Representations of these values at comptime cannot be easily serialized/deserialized whilst preserving data, which means many bitcasts would become runtime-known if pointers were involved, or would turn `undefined` values into `0xAA`. The new logic works by "flattening" the datastructure to be cast into a sequence of bit-packed atomic values, and then "unflattening" it; using serialization when necessary, but with special handling for `undefined` values and for pointers which align in virtual memory. The resulting code is definitely slower -- more on this later -- but it is correct. The pointer access and bitcast logic required some helper functions and types which are not generally useful elsewhere, so I opted to split them into separate files `Sema/comptime_ptr_access.zig` and `Sema/bitcast.zig`, with simple re-exports in `Sema.zig` for their small public APIs. Whilst working on this branch, I caught various unrelated bugs with transitive Sema errors, and with the handling of `undefined` values. These bugs have been fixed, and corresponding behavior test added. In terms of performance, I do anticipate that this commit will regress performance somewhat, because the new pointer access and bitcast logic is necessarily more complex. I have not yet taken performance measurements, but will do shortly, and post the results in this PR. If the performance regression is severe, I will do work to to optimize the new logic before merge. Resolves: #19452 Resolves: #19460 --- src/codegen/c.zig | 220 ++++++++++++++++++++++--------------------------- src/codegen/llvm.zig | 223 +++++++++++++++----------------------------------- src/codegen/spirv.zig | 132 ++++++++++++++++++------------ 3 files changed, 242 insertions(+), 333 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 818267a8b8..1725658a37 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -646,8 +646,7 @@ pub const DeclGen = struct { fn renderAnonDeclValue( dg: *DeclGen, writer: anytype, - ptr_val: Value, - anon_decl: InternPool.Key.Ptr.Addr.AnonDecl, + anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const zcu = dg.zcu; @@ -657,16 +656,16 @@ pub const DeclGen = struct { const decl_ty = decl_val.typeOf(zcu); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - const ptr_ty = ptr_val.typeOf(zcu); + const ptr_ty = Type.fromInterned(anon_decl.orig_ty); if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } // Chase function values in order to be able to reference the original function. if (decl_val.getFunction(zcu)) |func| - return dg.renderDeclValue(writer, ptr_val, func.owner_decl, location); + return dg.renderDeclValue(writer, func.owner_decl, location); if (decl_val.getExternFunc(zcu)) |extern_func| - return dg.renderDeclValue(writer, ptr_val, extern_func.decl, location); + return dg.renderDeclValue(writer, extern_func.decl, location); assert(decl_val.getVariable(zcu) == null); @@ -712,7 +711,6 @@ pub const DeclGen = struct { fn renderDeclValue( dg: *DeclGen, writer: anytype, - val: Value, decl_index: InternPool.DeclIndex, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { @@ -722,17 +720,17 @@ pub const DeclGen = struct { assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - const ty = val.typeOf(zcu); const decl_ty = decl.typeOf(zcu); - if (ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(zcu)) { - return dg.writeCValue(writer, .{ .undef = ty }); + const ptr_ty = try decl.declPtrType(zcu); + if (!decl_ty.isFnOrHasRuntimeBits(zcu)) { + return dg.writeCValue(writer, .{ .undef = ptr_ty }); } // Chase function values in order to be able to reference the original function. if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index) - return dg.renderDeclValue(writer, val, func.owner_decl, location); + return dg.renderDeclValue(writer, func.owner_decl, location); if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index) - return dg.renderDeclValue(writer, val, extern_func.decl, location); + return dg.renderDeclValue(writer, extern_func.decl, location); if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative); @@ -740,7 +738,7 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const ctype = try dg.ctypeFromType(ty, .complete); + const ctype = try dg.ctypeFromType(ptr_ty, .complete); const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype; const decl_ctype = try dg.ctypeFromType(decl_ty, .complete); const need_cast = !elem_ctype.eql(decl_ctype) and @@ -755,125 +753,108 @@ pub const DeclGen = struct { if (need_cast) try writer.writeByte(')'); } - /// Renders a "parent" pointer by recursing to the root decl/variable - /// that its contents are defined with respect to. - fn renderParentPtr( + fn renderPointer( dg: *DeclGen, writer: anytype, - ptr_val: InternPool.Index, + derivation: Value.PointerDeriveStep, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const zcu = dg.zcu; - const ip = &zcu.intern_pool; - const ptr_ty = Type.fromInterned(ip.typeOf(ptr_val)); - const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete); - const ptr_child_ctype = ptr_ctype.info(&dg.ctype_pool).pointer.elem_ctype; - const ptr = ip.indexToKey(ptr_val).ptr; - switch (ptr.addr) { - .decl => |d| try dg.renderDeclValue(writer, Value.fromInterned(ptr_val), d, location), - .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, Value.fromInterned(ptr_val), anon_decl, location), + switch (derivation) { + .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { + const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete); + const addr_val = try zcu.intValue(Type.usize, int.addr); try writer.writeByte('('); try dg.renderCType(writer, ptr_ctype); - try writer.print("){x}", .{try dg.fmtIntLiteral(Value.fromInterned(int), .Other)}); + try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)}); }, - .eu_payload, .opt_payload => |base| { - const ptr_base_ty = Type.fromInterned(ip.typeOf(base)); - const base_ty = ptr_base_ty.childType(zcu); - // Ensure complete type definition is visible before accessing fields. - _ = try dg.ctypeFromType(base_ty, .complete); - const payload_ty = switch (ptr.addr) { - .eu_payload => base_ty.errorUnionPayload(zcu), - .opt_payload => base_ty.optionalChild(zcu), - else => unreachable, - }; - const payload_ctype = try dg.ctypeFromType(payload_ty, .forward); - if (!ptr_child_ctype.eql(payload_ctype)) { - try writer.writeByte('('); - try dg.renderCType(writer, ptr_ctype); - try writer.writeByte(')'); - } + + .decl_ptr => |decl| try dg.renderDeclValue(writer, decl, location), + .anon_decl_ptr => |ad| try dg.renderAnonDeclValue(writer, ad, location), + + inline .eu_payload_ptr, .opt_payload_ptr => |info| { try writer.writeAll("&("); - try dg.renderParentPtr(writer, base, location); + try dg.renderPointer(writer, info.parent.*, location); try writer.writeAll(")->payload"); }, - .elem => |elem| { - const ptr_base_ty = Type.fromInterned(ip.typeOf(elem.base)); - const elem_ty = ptr_base_ty.elemType2(zcu); - const elem_ctype = try dg.ctypeFromType(elem_ty, .forward); - if (!ptr_child_ctype.eql(elem_ctype)) { - try writer.writeByte('('); - try dg.renderCType(writer, ptr_ctype); - try writer.writeByte(')'); - } - try writer.writeAll("&("); - if (ip.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One) - try writer.writeByte('*'); - try dg.renderParentPtr(writer, elem.base, location); - try writer.print(")[{d}]", .{elem.index}); - }, - .field => |field| { - const ptr_base_ty = Type.fromInterned(ip.typeOf(field.base)); - const base_ty = ptr_base_ty.childType(zcu); + + .field_ptr => |field| { + const parent_ptr_ty = try field.parent.ptrType(zcu); + // Ensure complete type definition is available before accessing fields. - _ = try dg.ctypeFromType(base_ty, .complete); - switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), zcu)) { + _ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete); + + switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) { .begin => { - const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete); - if (!ptr_ctype.eql(ptr_base_ctype)) { - try writer.writeByte('('); - try dg.renderCType(writer, ptr_ctype); - try writer.writeByte(')'); - } - try dg.renderParentPtr(writer, field.base, location); + const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); + try writer.writeByte('('); + try dg.renderCType(writer, ptr_ctype); + try writer.writeByte(')'); + try dg.renderPointer(writer, field.parent.*, location); }, .field => |name| { - const field_ty = switch (ip.indexToKey(base_ty.toIntern())) { - .anon_struct_type, - .struct_type, - .union_type, - => base_ty.structFieldType(@as(usize, @intCast(field.index)), zcu), - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One, .Many, .C => unreachable, - .Slice => switch (field.index) { - Value.slice_ptr_index => base_ty.slicePtrFieldType(zcu), - Value.slice_len_index => Type.usize, - else => unreachable, - }, - }, - else => unreachable, - }; - const field_ctype = try dg.ctypeFromType(field_ty, .forward); - if (!ptr_child_ctype.eql(field_ctype)) { - try writer.writeByte('('); - try dg.renderCType(writer, ptr_ctype); - try writer.writeByte(')'); - } try writer.writeAll("&("); - try dg.renderParentPtr(writer, field.base, location); + try dg.renderPointer(writer, field.parent.*, location); try writer.writeAll(")->"); try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { - const u8_ptr_ty = try zcu.adjustPtrTypeChild(ptr_ty, Type.u8); - const u8_ptr_ctype = try dg.ctypeFromType(u8_ptr_ty, .complete); - - if (!ptr_ctype.eql(u8_ptr_ctype)) { - try writer.writeByte('('); - try dg.renderCType(writer, ptr_ctype); - try writer.writeByte(')'); - } - try writer.writeAll("(("); - try dg.renderCType(writer, u8_ptr_ctype); + const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); + try writer.writeByte('('); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); - try dg.renderParentPtr(writer, field.base, location); - try writer.print(" + {})", .{ - try dg.fmtIntLiteral(try zcu.intValue(Type.usize, byte_offset), .Other), - }); + const offset_val = try zcu.intValue(Type.usize, byte_offset); + try writer.writeAll("((char *)"); + try dg.renderPointer(writer, field.parent.*, location); + try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); }, } }, - .comptime_field, .comptime_alloc => unreachable, + + .elem_ptr => |elem| if (!(try elem.parent.ptrType(zcu)).childType(zcu).hasRuntimeBits(zcu)) { + // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. + const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); + try writer.writeByte('('); + try dg.renderCType(writer, ptr_ctype); + try writer.writeByte(')'); + try dg.renderPointer(writer, elem.parent.*, location); + } else { + const index_val = try zcu.intValue(Type.usize, elem.elem_idx); + // We want to do pointer arithmetic on a pointer to the element type. + // We might have a pointer-to-array. In this case, we must cast first. + const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); + const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(zcu), .complete); + if (result_ctype.eql(parent_ctype)) { + // The pointer already has an appropriate type - just do the arithmetic. + try writer.writeByte('('); + try dg.renderPointer(writer, elem.parent.*, location); + try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)}); + } else { + // We probably have an array pointer `T (*)[n]`. Cast to an element pointer, + // and *then* apply the index. + try writer.writeAll("(("); + try dg.renderCType(writer, result_ctype); + try writer.writeByte(')'); + try dg.renderPointer(writer, elem.parent.*, location); + try writer.print(" + {})", .{try dg.fmtIntLiteral(index_val, .Other)}); + } + }, + + .offset_and_cast => |oac| { + const ptr_ctype = try dg.ctypeFromType(oac.new_ptr_ty, .complete); + try writer.writeByte('('); + try dg.renderCType(writer, ptr_ctype); + try writer.writeByte(')'); + if (oac.byte_offset == 0) { + try dg.renderPointer(writer, oac.parent.*, location); + } else { + const offset_val = try zcu.intValue(Type.usize, oac.byte_offset); + try writer.writeAll("((char *)"); + try dg.renderPointer(writer, oac.parent.*, location); + try writer.print(" + {})", .{try dg.fmtIntLiteral(offset_val, .Other)}); + } + }, } } @@ -1103,20 +1084,11 @@ pub const DeclGen = struct { } try writer.writeByte('}'); }, - .ptr => |ptr| switch (ptr.addr) { - .decl => |d| try dg.renderDeclValue(writer, val, d, location), - .anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, val, decl_val, location), - .int => |int| { - try writer.writeAll("(("); - try dg.renderCType(writer, ctype); - try writer.print("){x})", .{try dg.fmtIntLiteral(Value.fromInterned(int), location)}); - }, - .eu_payload, - .opt_payload, - .elem, - .field, - => try dg.renderParentPtr(writer, val.toIntern(), location), - .comptime_field, .comptime_alloc => unreachable, + .ptr => { + var arena = std.heap.ArenaAllocator.init(zcu.gpa); + defer arena.deinit(); + const derivation = try val.pointerDerivation(arena.allocator(), zcu); + try dg.renderPointer(writer, derivation, location); }, .opt => |opt| switch (ctype.info(ctype_pool)) { .basic => if (ctype.isBool()) try writer.writeAll(switch (opt.val) { @@ -4574,10 +4546,10 @@ fn airCall( break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| func.owner_decl, - .ptr => |ptr| switch (ptr.addr) { + .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => |decl| decl, else => break :known, - }, + } else break :known, else => break :known, }; }; @@ -5147,10 +5119,10 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool 'I' => !target.cpu.arch.isArmOrThumb(), else => switch (value) { .constant => |val| switch (f.object.dg.zcu.intern_pool.indexToKey(val.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { + .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { .decl => false, else => true, - }, + } else true, else => true, }, else => false, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index db0eaa3ce5..8d2bca2f6c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3262,6 +3262,7 @@ pub const Object = struct { try o.lowerType(Type.fromInterned(vector_type.child)), ), .opt_type => |child_ty| { + // Must stay in sync with `opt_payload` logic in `lowerPtr`. if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8; const payload_ty = try o.lowerType(Type.fromInterned(child_ty)); @@ -3281,6 +3282,8 @@ pub const Object = struct { }, .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), .error_union_type => |error_union_type| { + // Must stay in sync with `codegen.errUnionPayloadOffset`. + // See logic in `lowerPtr`. const error_type = try o.errorIntType(); if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod)) return error_type; @@ -3792,17 +3795,7 @@ pub const Object = struct { 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), else => unreachable, }, - .ptr => |ptr| return switch (ptr.addr) { - .decl => |decl| try o.lowerDeclRefValue(ty, decl), - .anon_decl => |anon_decl| try o.lowerAnonDeclRef(ty, anon_decl), - .int => |int| try o.lowerIntAsPtr(int), - .eu_payload, - .opt_payload, - .elem, - .field, - => try o.lowerParentPtr(val), - .comptime_field, .comptime_alloc => unreachable, - }, + .ptr => try o.lowerPtr(arg_val, 0), .slice => |slice| return o.builder.structConst(try o.lowerType(ty), &.{ try o.lowerValue(slice.ptr), try o.lowerValue(slice.len), @@ -4223,20 +4216,6 @@ pub const Object = struct { }; } - fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant { - const mod = o.module; - switch (mod.intern_pool.indexToKey(val)) { - .undef => return o.builder.undefConst(.ptr), - .int => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = Value.fromInterned(val).toBigInt(&bigint_space, mod); - const llvm_int = try lowerBigInt(o, Type.usize, bigint); - return o.builder.castConst(.inttoptr, llvm_int, .ptr); - }, - else => unreachable, - } - } - fn lowerBigInt( o: *Object, ty: Type, @@ -4246,129 +4225,60 @@ pub const Object = struct { return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } - fn lowerParentPtrDecl(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { - const mod = o.module; - const decl = mod.declPtr(decl_index); - const ptr_ty = try mod.singleMutPtrType(decl.typeOf(mod)); - return o.lowerDeclRefValue(ptr_ty, decl_index); - } - - fn lowerParentPtr(o: *Object, ptr_val: Value) Error!Builder.Constant { - const mod = o.module; - const ip = &mod.intern_pool; - const ptr = ip.indexToKey(ptr_val.toIntern()).ptr; - return switch (ptr.addr) { - .decl => |decl| try o.lowerParentPtrDecl(decl), - .anon_decl => |ad| try o.lowerAnonDeclRef(Type.fromInterned(ad.orig_ty), ad), - .int => |int| try o.lowerIntAsPtr(int), - .eu_payload => |eu_ptr| { - const parent_ptr = try o.lowerParentPtr(Value.fromInterned(eu_ptr)); - - const eu_ty = Type.fromInterned(ip.typeOf(eu_ptr)).childType(mod); - const payload_ty = eu_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_ptr; - } - - const err_int_ty = try mod.errorIntType(); - const payload_align = payload_ty.abiAlignment(mod); - const err_align = err_int_ty.abiAlignment(mod); - const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1; - return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{ - .@"0", try o.builder.intConst(.i32, index), + fn lowerPtr( + o: *Object, + ptr_val: InternPool.Index, + prev_offset: u64, + ) Error!Builder.Constant { + const zcu = o.module; + const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; + const offset: u64 = prev_offset + ptr.byte_offset; + return switch (ptr.base_addr) { + .decl => |decl| { + const base_ptr = try o.lowerDeclRefValue(decl); + return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ + try o.builder.intConst(.i64, offset), }); }, - .opt_payload => |opt_ptr| { - const parent_ptr = try o.lowerParentPtr(Value.fromInterned(opt_ptr)); - - const opt_ty = Type.fromInterned(ip.typeOf(opt_ptr)).childType(mod); - const payload_ty = opt_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or - payload_ty.optionalReprIsPayload(mod)) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_ptr; - } - - return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ .@"0", .@"0" }); - }, - .comptime_field, .comptime_alloc => unreachable, - .elem => |elem_ptr| { - const parent_ptr = try o.lowerParentPtr(Value.fromInterned(elem_ptr.base)); - const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod); - - return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{ - try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index), + .anon_decl => |ad| { + const base_ptr = try o.lowerAnonDeclRef(ad); + return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ + try o.builder.intConst(.i64, offset), }); }, - .field => |field_ptr| { - const parent_ptr = try o.lowerParentPtr(Value.fromInterned(field_ptr.base)); - const parent_ptr_ty = Type.fromInterned(ip.typeOf(field_ptr.base)); - const parent_ty = parent_ptr_ty.childType(mod); - const field_index: u32 = @intCast(field_ptr.index); - switch (parent_ty.zigTypeTag(mod)) { - .Union => { - if (parent_ty.containerLayout(mod) == .@"packed") { - return parent_ptr; - } - - const layout = parent_ty.unionGetLayout(mod); - if (layout.payload_size == 0) { - // In this case a pointer to the union and a pointer to any - // (void) payload is the same. - return parent_ptr; - } - - const parent_llvm_ty = try o.lowerType(parent_ty); - return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ - .@"0", - try o.builder.intConst(.i32, @intFromBool( - layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align), - )), - }); - }, - .Struct => { - if (mod.typeToPackedStruct(parent_ty)) |struct_type| { - const ptr_info = Type.fromInterned(ptr.ty).ptrInfo(mod); - if (ptr_info.packed_offset.host_size != 0) return parent_ptr; - - const parent_ptr_info = parent_ptr_ty.ptrInfo(mod); - const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_info.packed_offset.bit_offset; - const llvm_usize = try o.lowerType(Type.usize); - const base_addr = try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize); - const byte_offset = try o.builder.intConst(llvm_usize, @divExact(bit_offset, 8)); - const field_addr = try o.builder.binConst(.add, base_addr, byte_offset); - return o.builder.castConst(.inttoptr, field_addr, .ptr); - } - - return o.builder.gepConst( - .inbounds, - try o.lowerType(parent_ty), - parent_ptr, - null, - if (o.llvmFieldIndex(parent_ty, field_index)) |llvm_field_index| &.{ - .@"0", - try o.builder.intConst(.i32, llvm_field_index), - } else &.{ - try o.builder.intConst(.i32, @intFromBool( - parent_ty.hasRuntimeBitsIgnoreComptime(mod), - )), - }, - ); + .int => try o.builder.castConst( + .inttoptr, + try o.builder.intConst(try o.lowerType(Type.usize), offset), + .ptr, + ), + .eu_payload => |eu_ptr| try o.lowerPtr( + eu_ptr, + offset + @import("../codegen.zig").errUnionPayloadOffset( + Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu), + zcu, + ), + ), + .opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset), + .field => |field| { + const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu); + const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) { + .Pointer => off: { + assert(agg_ty.isSlice(zcu)); + break :off switch (field.index) { + Value.slice_ptr_index => 0, + Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8), + else => unreachable, + }; }, - .Pointer => { - assert(parent_ty.isSlice(mod)); - const parent_llvm_ty = try o.lowerType(parent_ty); - return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ - .@"0", try o.builder.intConst(.i32, field_index), - }); + .Struct, .Union => switch (agg_ty.containerLayout(zcu)) { + .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu), + .@"extern", .@"packed" => unreachable, }, else => unreachable, - } + }; + return o.lowerPtr(field.base, offset + field_off); }, + .arr_elem, .comptime_field, .comptime_alloc => unreachable, }; } @@ -4376,8 +4286,7 @@ pub const Object = struct { /// Maybe the logic could be unified. fn lowerAnonDeclRef( o: *Object, - ptr_ty: Type, - anon_decl: InternPool.Key.Ptr.Addr.AnonDecl, + anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, ) Error!Builder.Constant { const mod = o.module; const ip = &mod.intern_pool; @@ -4393,6 +4302,8 @@ pub const Object = struct { @panic("TODO"); } + const ptr_ty = Type.fromInterned(anon_decl.orig_ty); + const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); @@ -4400,9 +4311,8 @@ pub const Object = struct { if (is_fn_body) @panic("TODO"); - const orig_ty = Type.fromInterned(anon_decl.orig_ty); - const llvm_addr_space = toLlvmAddressSpace(orig_ty.ptrAddressSpace(mod), target); - const alignment = orig_ty.ptrAlignment(mod); + const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); + const alignment = ptr_ty.ptrAlignment(mod); const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( @@ -4411,13 +4321,10 @@ pub const Object = struct { try o.builder.ptrType(llvm_addr_space), ); - return o.builder.convConst(if (ptr_ty.isAbiInt(mod)) switch (ptr_ty.intInfo(mod).signedness) { - .signed => .signed, - .unsigned => .unsigned, - } else .unneeded, llvm_val, try o.lowerType(ptr_ty)); + return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty)); } - fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { + fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { const mod = o.module; // In the case of something like: @@ -4428,18 +4335,23 @@ pub const Object = struct { const decl = mod.declPtr(decl_index); if (decl.val.getFunction(mod)) |func| { if (func.owner_decl != decl_index) { - return o.lowerDeclRefValue(ty, func.owner_decl); + return o.lowerDeclRefValue(func.owner_decl); } } else if (decl.val.getExternFunc(mod)) |func| { if (func.decl != decl_index) { - return o.lowerDeclRefValue(ty, func.decl); + return o.lowerDeclRefValue(func.decl); } } const decl_ty = decl.typeOf(mod); + const ptr_ty = try decl.declPtrType(mod); + const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl_ty.hasRuntimeBits(mod)) or - (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ty); + (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) + { + return o.lowerPtrToVoid(ptr_ty); + } const llvm_global = if (is_fn_body) (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global @@ -4452,10 +4364,7 @@ pub const Object = struct { try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())), ); - return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) { - .signed => .signed, - .unsigned => .unsigned, - } else .unneeded, llvm_val, try o.lowerType(ty)); + return o.builder.convConst(.unneeded, llvm_val, try o.lowerType(ptr_ty)); } fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 53ec59d531..ed04ee475b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -863,7 +863,7 @@ const DeclGen = struct { const result_ty_id = try self.resolveType(ty, repr); const ip = &mod.intern_pool; - log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) }); + log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod, null) }); if (val.isUndefDeep(mod)) { return self.spv.constUndef(result_ty_id); } @@ -983,10 +983,10 @@ const DeclGen = struct { const int_ty = ty.intTagType(mod); break :cache try self.constant(int_ty, int_val, repr); }, - .ptr => return self.constantPtr(ty, val), + .ptr => return self.constantPtr(val), .slice => |slice| { const ptr_ty = ty.slicePtrFieldType(mod); - const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr)); + const ptr_id = try self.constantPtr(Value.fromInterned(slice.ptr)); const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect); return self.constructStruct( ty, @@ -1107,62 +1107,86 @@ const DeclGen = struct { return cacheable_id; } - fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef { + fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef { // TODO: Caching?? - const result_ty_id = try self.resolveType(ptr_ty, .direct); - const mod = self.module; + const zcu = self.module; + + if (ptr_val.isUndef(zcu)) { + const result_ty = ptr_val.typeOf(zcu); + const result_ty_id = try self.resolveType(result_ty, .direct); + return self.spv.constUndef(result_ty_id); + } - if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id); + var arena = std.heap.ArenaAllocator.init(self.gpa); + defer arena.deinit(); - switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { - .decl => |decl| return try self.constantDeclRef(ptr_ty, decl), - .anon_decl => |anon_decl| return try self.constantAnonDeclRef(ptr_ty, anon_decl), + const derivation = try ptr_val.pointerDerivation(arena.allocator(), zcu); + return self.derivePtr(derivation); + } + + fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef { + const zcu = self.module; + switch (derivation) { + .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { - const ptr_id = self.spv.allocId(); + const result_ty_id = try self.resolveType(int.ptr_ty, .direct); // TODO: This can probably be an OpSpecConstantOp Bitcast, but // that is not implemented by Mesa yet. Therefore, just generate it // as a runtime operation. + const result_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = result_ty_id, - .id_result = ptr_id, - .integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct), + .id_result = result_ptr_id, + .integer_value = try self.constant(Type.usize, try zcu.intValue(Type.usize, int.addr), .direct), }); - return ptr_id; + return result_ptr_id; + }, + .decl_ptr => |decl| { + const result_ptr_ty = try zcu.declPtr(decl).declPtrType(zcu); + return self.constantDeclRef(result_ptr_ty, decl); }, - .eu_payload => unreachable, // TODO - .opt_payload => unreachable, // TODO - .comptime_field, .comptime_alloc => unreachable, - .elem => |elem_ptr| { - const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)); - const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base)); - const index_id = try self.constInt(Type.usize, elem_ptr.index, .direct); - - const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id); - - // TODO: Can we consolidate this in ptrElemPtr? - const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. - const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod))); - - // TODO: Can we remove this ID comparison? - if (elem_ptr_ty_id == result_ty_id) { - return elem_ptr_id; + .anon_decl_ptr => |ad| { + const result_ptr_ty = Type.fromInterned(ad.orig_ty); + return self.constantAnonDeclRef(result_ptr_ty, ad); + }, + .eu_payload_ptr => @panic("TODO"), + .opt_payload_ptr => @panic("TODO"), + .field_ptr => |field| { + const parent_ptr_id = try self.derivePtr(field.parent.*); + const parent_ptr_ty = try field.parent.ptrType(zcu); + return self.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx); + }, + .elem_ptr => |elem| { + const parent_ptr_id = try self.derivePtr(elem.parent.*); + const parent_ptr_ty = try elem.parent.ptrType(zcu); + const index_id = try self.constInt(Type.usize, elem.elem_idx, .direct); + return self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id); + }, + .offset_and_cast => |oac| { + const parent_ptr_id = try self.derivePtr(oac.parent.*); + const parent_ptr_ty = try oac.parent.ptrType(zcu); + disallow: { + if (oac.byte_offset != 0) break :disallow; + // Allow changing the pointer type child only to restructure arrays. + // e.g. [3][2]T to T is fine, as is [2]T -> [2][1]T. + const src_base_ty = parent_ptr_ty.arrayBase(zcu)[0]; + const dest_base_ty = oac.new_ptr_ty.arrayBase(zcu)[0]; + if (self.getTarget().os.tag == .vulkan and src_base_ty.toIntern() != dest_base_ty.toIntern()) break :disallow; + + const result_ty_id = try self.resolveType(oac.new_ptr_ty, .direct); + const result_ptr_id = self.spv.allocId(); + try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ + .id_result_type = result_ty_id, + .id_result = result_ptr_id, + .operand = parent_ptr_id, + }); + return result_ptr_id; } - // This may happen when we have pointer-to-array and the result is - // another pointer-to-array instead of a pointer-to-element. - const result_id = self.spv.allocId(); - try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .operand = elem_ptr_id, + return self.fail("Cannot perform pointer cast: '{}' to '{}'", .{ + parent_ptr_ty.fmt(zcu), + oac.new_ptr_ty.fmt(zcu), }); - return result_id; - }, - .field => |field| { - const base_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base)); - const base_ptr = try self.constantPtr(base_ptr_ty, Value.fromInterned(field.base)); - const field_index: u32 = @intCast(field.index); - return try self.structFieldPtr(ptr_ty, base_ptr_ty, base_ptr, field_index); }, } } @@ -1170,7 +1194,7 @@ const DeclGen = struct { fn constantAnonDeclRef( self: *DeclGen, ty: Type, - anon_decl: InternPool.Key.Ptr.Addr.AnonDecl, + anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, ) !IdRef { // TODO: Merge this function with constantDeclRef. @@ -4456,16 +4480,20 @@ const DeclGen = struct { ) !IdRef { const result_ty_id = try self.resolveType(result_ptr_ty, .direct); - const mod = self.module; - const object_ty = object_ptr_ty.childType(mod); - switch (object_ty.zigTypeTag(mod)) { - .Struct => switch (object_ty.containerLayout(mod)) { + const zcu = self.module; + const object_ty = object_ptr_ty.childType(zcu); + switch (object_ty.zigTypeTag(zcu)) { + .Pointer => { + assert(object_ty.isSlice(zcu)); + return self.accessChain(result_ty_id, object_ptr, &.{field_index}); + }, + .Struct => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { return try self.accessChain(result_ty_id, object_ptr, &.{field_index}); }, }, - .Union => switch (object_ty.containerLayout(mod)) { + .Union => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { const layout = self.unionLayout(object_ty); @@ -4475,7 +4503,7 @@ const DeclGen = struct { return try self.spv.constUndef(result_ty_id); } - const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod)); + const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu)); const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class); const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index}); -- cgit v1.2.3