aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/spirv.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-04-06 13:03:22 -0700
committerGitHub <noreply@github.com>2024-04-06 13:03:22 -0700
commitf45ba7d0c1489a439725a11e5e36aa69fe75e7c3 (patch)
treefe5fea980d6ac7ecc3b1b7a45f82bbea4a0a460a /src/codegen/spirv.zig
parentf668c8bfd65489d1d38716e2973e0ee1cf0e8c52 (diff)
parentac165458959d9db9c9fd0362aeb35f8b983cdf6a (diff)
downloadzig-f45ba7d0c1489a439725a11e5e36aa69fe75e7c3.tar.gz
zig-f45ba7d0c1489a439725a11e5e36aa69fe75e7c3.zip
Merge pull request #19562 from Snektron/spirv-remove-cache
spirv: remove cache
Diffstat (limited to 'src/codegen/spirv.zig')
-rw-r--r--src/codegen/spirv.zig1550
1 files changed, 777 insertions, 773 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index a47497d89d..2570343763 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -22,8 +22,6 @@ const IdResultType = spec.IdResultType;
const StorageClass = spec.StorageClass;
const SpvModule = @import("spirv/Module.zig");
-const CacheRef = SpvModule.CacheRef;
-const CacheString = SpvModule.CacheString;
const SpvSection = @import("spirv/Section.zig");
const SpvAssembler = @import("spirv/Assembler.zig");
@@ -32,14 +30,11 @@ const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
pub const zig_call_abi_ver = 3;
-/// We want to store some extra facts about types as mapped from Zig to SPIR-V.
-/// This structure is used to keep that extra information, as well as
-/// the cached reference to the type.
-const SpvTypeInfo = struct {
- ty_ref: CacheRef,
-};
-
-const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, SpvTypeInfo);
+const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, DeclGen.Repr }, IdResult);
+const PtrTypeMap = std.AutoHashMapUnmanaged(
+ struct { InternPool.Index, StorageClass },
+ struct { ty_id: IdRef, fwd_emitted: bool },
+);
const ControlFlow = union(enum) {
const Structured = struct {
@@ -162,14 +157,16 @@ pub const Object = struct {
/// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices.
anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
- /// A map that maps AIR intern pool indices to SPIR-V cache references (which
- /// is basically the same thing except for SPIR-V).
- /// This map is typically only used for structures that are deemed heavy enough
- /// that it is worth to store them here. The SPIR-V module also interns types,
- /// and so the main purpose of this map is to avoid recomputation and to
- /// cache extra information about the type rather than to aid in validity
- /// of the SPIR-V module.
- type_map: TypeMap = .{},
+ /// A map that maps AIR intern pool indices to SPIR-V result-ids.
+ intern_map: InternMap = .{},
+
+ /// This map serves a dual purpose:
+ /// - It keeps track of pointers that are currently being emitted, so that we can tell
+ /// if they are recursive and need an OpTypeForwardPointer.
+ /// - It caches pointers by child-type. This is required because sometimes we rely on
+ /// ID-equality for pointers, and pointers constructed via `ptrType()` aren't interned
+ /// via the usual `intern_map` mechanism.
+ ptr_types: PtrTypeMap = .{},
pub fn init(gpa: Allocator) Object {
return .{
@@ -182,7 +179,8 @@ pub const Object = struct {
self.spv.deinit();
self.decl_link.deinit(self.gpa);
self.anon_decl_link.deinit(self.gpa);
- self.type_map.deinit(self.gpa);
+ self.intern_map.deinit(self.gpa);
+ self.ptr_types.deinit(self.gpa);
}
fn genDecl(
@@ -204,7 +202,8 @@ pub const Object = struct {
.decl_index = decl_index,
.air = air,
.liveness = liveness,
- .type_map = &self.type_map,
+ .intern_map = &self.intern_map,
+ .ptr_types = &self.ptr_types,
.control_flow = switch (structured_cfg) {
true => .{ .structured = .{} },
false => .{ .unstructured = .{} },
@@ -309,13 +308,12 @@ const DeclGen = struct {
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap = .{},
- /// A map that maps AIR intern pool indices to SPIR-V cache references.
- /// See Object.type_map
- type_map: *TypeMap,
+ /// A map that maps AIR intern pool indices to SPIR-V result-ids.
+ /// See `Object.intern_map`.
+ intern_map: *InternMap,
- /// Child types of pointers that are currently in progress of being resolved. If a pointer
- /// is already in this map, its recursive.
- wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{},
+ /// Module's pointer types, see `Object.ptr_types`.
+ ptr_types: *PtrTypeMap,
/// This field keeps track of the current state wrt structured or unstructured control flow.
control_flow: ControlFlow,
@@ -402,7 +400,6 @@ const DeclGen = struct {
pub fn deinit(self: *DeclGen) void {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
- self.wip_pointers.deinit(self.gpa);
self.control_flow.deinit(self.gpa);
self.func.deinit(self.gpa);
}
@@ -452,7 +449,7 @@ const DeclGen = struct {
const mod = self.module;
const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
- const decl_ptr_ty_ref = try self.ptrType(ty, .Generic);
+ const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
const spv_decl_index = blk: {
const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function });
@@ -460,7 +457,7 @@ const DeclGen = struct {
try self.addFunctionDep(entry.value_ptr.*, .Function);
const result_id = self.spv.declPtr(entry.value_ptr.*).result_id;
- return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
+ return try self.castToGeneric(decl_ptr_ty_id, result_id);
}
const spv_decl_index = try self.spv.allocDecl(.invocation_global);
@@ -488,19 +485,14 @@ const DeclGen = struct {
self.func = .{};
defer self.func.deinit(self.gpa);
- const void_ty_ref = try self.resolveType(Type.void, .direct);
- const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{},
- } });
+ const initializer_proto_ty_id = try self.functionType(Type.void, &.{});
const initializer_id = self.spv.allocId();
-
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = initializer_id,
.function_control = .{},
- .function_type = self.typeId(initializer_proto_ty_ref),
+ .function_type = initializer_proto_ty_id,
});
const root_block_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
@@ -520,9 +512,9 @@ const DeclGen = struct {
try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
- const fn_decl_ptr_ty_ref = try self.ptrType(ty, .Function);
+ const fn_decl_ptr_ty_id = try self.ptrType(ty, .Function);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(fn_decl_ptr_ty_ref),
+ .id_result_type = fn_decl_ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -530,7 +522,7 @@ const DeclGen = struct {
});
}
- return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
+ return try self.castToGeneric(decl_ptr_ty_id, result_id);
}
fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void {
@@ -696,14 +688,25 @@ const DeclGen = struct {
/// Emits a bool constant in a particular representation.
fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef {
+ // TODO: Cache?
+
+ const section = &self.spv.sections.types_globals_constants;
switch (repr) {
.indirect => {
- const int_ty_ref = try self.intType(.unsigned, 1);
- return self.constInt(int_ty_ref, @intFromBool(value));
+ return try self.constInt(Type.u1, @intFromBool(value), .indirect);
},
.direct => {
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- return self.spv.constBool(bool_ty_ref, value);
+ const result_ty_id = try self.resolveType(Type.bool, .direct);
+ const result_id = self.spv.allocId();
+ const operands = .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ };
+ switch (value) {
+ true => try section.emit(self.spv.gpa, .OpConstantTrue, operands),
+ false => try section.emit(self.spv.gpa, .OpConstantFalse, operands),
+ }
+ return result_id;
},
}
}
@@ -711,68 +714,63 @@ const DeclGen = struct {
/// Emits an integer constant.
/// This function, unlike SpvModule.constInt, takes care to bitcast
/// the value to an unsigned int first for Kernels.
- fn constInt(self: *DeclGen, ty_ref: CacheRef, value: anytype) !IdRef {
- switch (self.spv.cache.lookup(ty_ref)) {
- .vector_type => |vec_type| {
- const elem_ids = try self.gpa.alloc(IdRef, vec_type.component_count);
- defer self.gpa.free(elem_ids);
- const int_value = try self.constInt(vec_type.component_type, value);
- @memset(elem_ids, int_value);
-
- const constituents_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
- .id_result_type = self.typeId(ty_ref),
- .id_result = constituents_id,
- .constituents = elem_ids,
- });
- return constituents_id;
- },
- else => {},
- }
+ fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef {
+ // TODO: Cache?
+ const mod = self.module;
+ const scalar_ty = ty.scalarType(mod);
+ const int_info = scalar_ty.intInfo(mod);
+ // Use backing bits so that negatives are sign extended
+ const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
+
+ const bits: u64 = switch (int_info.signedness) {
+ // Intcast needed to silence compile errors for when the wrong path is compiled.
+ // Lazy fix.
+ .signed => @bitCast(@as(i64, @intCast(value))),
+ .unsigned => @as(u64, @intCast(value)),
+ };
- if (value < 0) {
- const ty = self.spv.cache.lookup(ty_ref).int_type;
- // Manually truncate the value so that the resulting value
- // fits within the unsigned type.
- const bits: u64 = @bitCast(@as(i64, @intCast(value)));
- const truncated_bits = if (ty.bits == 64)
- bits
- else
- bits & (@as(u64, 1) << @intCast(ty.bits)) - 1;
- return try self.spv.constInt(ty_ref, truncated_bits);
- } else {
- return try self.spv.constInt(ty_ref, value);
+ // Manually truncate the value to the right amount of bits.
+ const truncated_bits = if (backing_bits == 64)
+ bits
+ else
+ bits & (@as(u64, 1) << @intCast(backing_bits)) - 1;
+
+ const result_ty_id = try self.resolveType(scalar_ty, repr);
+ const result_id = self.spv.allocId();
+
+ const section = &self.spv.sections.types_globals_constants;
+ switch (backing_bits) {
+ 0 => unreachable, // u0 is comptime
+ 1...32 => try section.emit(self.spv.gpa, .OpConstant, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .value = .{ .uint32 = @truncate(truncated_bits) },
+ }),
+ 33...64 => try section.emit(self.spv.gpa, .OpConstant, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .value = .{ .uint64 = truncated_bits },
+ }),
+ else => unreachable, // TODO: Large integer constants
}
- }
- /// Emits a float constant
- fn constFloat(self: *DeclGen, ty_ref: CacheRef, value: f128) !IdRef {
- switch (self.spv.cache.lookup(ty_ref)) {
- .vector_type => |vec_type| {
- const elem_ids = try self.gpa.alloc(IdRef, vec_type.component_count);
- defer self.gpa.free(elem_ids);
- const int_value = try self.constFloat(vec_type.component_type, value);
- @memset(elem_ids, int_value);
-
- const constituents_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
- .id_result_type = self.typeId(ty_ref),
- .id_result = constituents_id,
- .constituents = elem_ids,
- });
- return constituents_id;
- },
- else => {},
+ if (!ty.isVector(mod)) {
+ return result_id;
}
- const ty = self.spv.cache.lookup(ty_ref).float_type;
- return switch (ty.bits) {
- 16 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float16 = @floatCast(value) } } }),
- 32 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float32 = @floatCast(value) } } }),
- 64 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float64 = @floatCast(value) } } }),
- 80, 128 => unreachable, // TODO
- else => unreachable,
- };
+ const n = ty.vectorLen(mod);
+ const ids = try self.gpa.alloc(IdRef, n);
+ defer self.gpa.free(ids);
+ @memset(ids, result_id);
+
+ const vec_ty_id = try self.resolveType(ty, repr);
+ const vec_result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
+ .id_result_type = vec_ty_id,
+ .id_result = vec_result_id,
+ .constituents = ids,
+ });
+ return vec_result_id;
}
/// Construct a struct at runtime.
@@ -788,8 +786,8 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
for (constituents, types, 0..) |constitent_id, member_ty, index| {
- const ptr_member_ty_ref = try self.ptrType(member_ty, .Function);
- const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_member_ty_id = try self.ptrType(member_ty, .Function);
+ const ptr_id = try self.accessChain(ptr_member_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -810,9 +808,9 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const mod = self.module;
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
- const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function);
+ const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function);
for (constituents, 0..) |constitent_id, index| {
- const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -834,9 +832,9 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const mod = self.module;
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
- const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function);
+ const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function);
for (constituents, 0..) |constitent_id, index| {
- const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -852,258 +850,279 @@ const DeclGen = struct {
/// is done by emitting a sequence of instructions that initialize the value.
//
/// This function should only be called during function code generation.
- fn constant(self: *DeclGen, ty: Type, arg_val: Value, repr: Repr) !IdRef {
+ fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
+ // Note: Using intern_map can only be used with constants that DO NOT generate any runtime code!!
+ // Ideally that should be all constants in the future, or it should be cleaned up somehow. For
+ // now, only use the intern_map on case-by-case basis by breaking to :cache.
+ if (self.intern_map.get(.{ val.toIntern(), repr })) |id| {
+ return id;
+ }
+
const mod = self.module;
const target = self.getTarget();
- const result_ty_ref = try self.resolveType(ty, repr);
+ const result_ty_id = try self.resolveType(ty, repr);
const ip = &mod.intern_pool;
- const val = arg_val;
-
- log.debug("constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
+ log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
if (val.isUndefDeep(mod)) {
- return self.spv.constUndef(result_ty_ref);
- }
-
- switch (ip.indexToKey(val.toIntern())) {
- .int_type,
- .ptr_type,
- .array_type,
- .vector_type,
- .opt_type,
- .anyframe_type,
- .error_union_type,
- .simple_type,
- .struct_type,
- .anon_struct_type,
- .union_type,
- .opaque_type,
- .enum_type,
- .func_type,
- .error_set_type,
- .inferred_error_set_type,
- => unreachable, // types, not values
-
- .undef => unreachable, // handled above
-
- .variable,
- .extern_func,
- .func,
- .enum_literal,
- .empty_enum_value,
- => unreachable, // non-runtime values
-
- .simple_value => |simple_value| switch (simple_value) {
- .undefined,
- .void,
- .null,
- .empty_struct,
- .@"unreachable",
- .generic_poison,
+ return self.spv.constUndef(result_ty_id);
+ }
+
+ const section = &self.spv.sections.types_globals_constants;
+
+ const cacheable_id = cache: {
+ switch (ip.indexToKey(val.toIntern())) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef => unreachable, // handled above
+
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ .empty_enum_value,
=> unreachable, // non-runtime values
- .false, .true => return try self.constBool(val.toBool(), repr),
- },
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
- .int => {
- if (ty.isSignedInt(mod)) {
- return try self.constInt(result_ty_ref, val.toSignedInt(mod));
- } else {
- return try self.constInt(result_ty_ref, val.toUnsignedInt(mod));
- }
- },
- .float => return switch (ty.floatBits(target)) {
- 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }),
- 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }),
- 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }),
- 80, 128 => unreachable, // TODO
- else => unreachable,
- },
- .err => |err| {
- const value = try mod.getErrorValue(err.name);
- return try self.constInt(result_ty_ref, value);
- },
- .error_union => |error_union| {
- // TODO: Error unions may be constructed with constant instructions if the payload type
- // allows it. For now, just generate it here regardless.
- const err_int_ty = try mod.errorIntType();
- const err_ty = switch (error_union.val) {
- .err_name => ty.errorUnionSet(mod),
- .payload => err_int_ty,
- };
- const err_val = switch (error_union.val) {
- .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{
- .ty = ty.errorUnionSet(mod).toIntern(),
- .name = err_name,
- } }))),
- .payload => try mod.intValue(err_int_ty, 0),
- };
- const payload_ty = ty.errorUnionPayload(mod);
- const eu_layout = self.errorUnionLayout(payload_ty);
- if (!eu_layout.payload_has_bits) {
- // We use the error type directly as the type.
- return try self.constant(err_ty, err_val, .indirect);
- }
-
- const payload_val = Value.fromInterned(switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
- .payload => |payload| payload,
- });
-
- var constituents: [2]IdRef = undefined;
- var types: [2]Type = undefined;
- if (eu_layout.error_first) {
- constituents[0] = try self.constant(err_ty, err_val, .indirect);
- constituents[1] = try self.constant(payload_ty, payload_val, .indirect);
- types = .{ err_ty, payload_ty };
- } else {
- constituents[0] = try self.constant(payload_ty, payload_val, .indirect);
- constituents[1] = try self.constant(err_ty, err_val, .indirect);
- types = .{ payload_ty, err_ty };
- }
-
- return try self.constructStruct(ty, &types, &constituents);
- },
- .enum_tag => {
- const int_val = try val.intFromEnum(ty, mod);
- const int_ty = ty.intTagType(mod);
- return try self.constant(int_ty, int_val, repr);
- },
- .ptr => return self.constantPtr(ty, val),
- .slice => |slice| {
- const ptr_ty = ty.slicePtrFieldType(mod);
- const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr));
- const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
- return self.constructStruct(
- ty,
- &.{ ptr_ty, Type.usize },
- &.{ ptr_id, len_id },
- );
- },
- .opt => {
- const payload_ty = ty.optionalChild(mod);
- const maybe_payload_val = val.optionalValue(mod);
-
- if (!payload_ty.hasRuntimeBits(mod)) {
- return try self.constBool(maybe_payload_val != null, .indirect);
- } else if (ty.optionalReprIsPayload(mod)) {
- // Optional representation is a nullable pointer or slice.
- if (maybe_payload_val) |payload_val| {
- return try self.constant(payload_ty, payload_val, .indirect);
+ .false, .true => break :cache try self.constBool(val.toBool(), repr),
+ },
+ .int => {
+ if (ty.isSignedInt(mod)) {
+ break :cache try self.constInt(ty, val.toSignedInt(mod), repr);
} else {
- const ptr_ty_ref = try self.resolveType(ty, .indirect);
- return self.spv.constNull(ptr_ty_ref);
+ break :cache try self.constInt(ty, val.toUnsignedInt(mod), repr);
+ }
+ },
+ .float => {
+ const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
+ 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, mod))) },
+ 32 => .{ .float32 = val.toFloat(f32, mod) },
+ 64 => .{ .float64 = val.toFloat(f64, mod) },
+ 80, 128 => unreachable, // TODO
+ else => unreachable,
+ };
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpConstant, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .value = lit,
+ });
+ break :cache result_id;
+ },
+ .err => |err| {
+ const value = try mod.getErrorValue(err.name);
+ break :cache try self.constInt(ty, value, repr);
+ },
+ .error_union => |error_union| {
+ // TODO: Error unions may be constructed with constant instructions if the payload type
+ // allows it. For now, just generate it here regardless.
+ const err_int_ty = try mod.errorIntType();
+ const err_ty = switch (error_union.val) {
+ .err_name => ty.errorUnionSet(mod),
+ .payload => err_int_ty,
+ };
+ const err_val = switch (error_union.val) {
+ .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{
+ .ty = ty.errorUnionSet(mod).toIntern(),
+ .name = err_name,
+ } }))),
+ .payload => try mod.intValue(err_int_ty, 0),
+ };
+ const payload_ty = ty.errorUnionPayload(mod);
+ const eu_layout = self.errorUnionLayout(payload_ty);
+ if (!eu_layout.payload_has_bits) {
+ // We use the error type directly as the type.
+ break :cache try self.constant(err_ty, err_val, .indirect);
}
- }
-
- // Optional representation is a structure.
- // { Payload, Bool }
- const has_pl_id = try self.constBool(maybe_payload_val != null, .indirect);
- const payload_id = if (maybe_payload_val) |payload_val|
- try self.constant(payload_ty, payload_val, .indirect)
- else
- try self.spv.constUndef(try self.resolveType(payload_ty, .indirect));
+ const payload_val = Value.fromInterned(switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ .payload => |payload| payload,
+ });
- return try self.constructStruct(
- ty,
- &.{ payload_ty, Type.bool },
- &.{ payload_id, has_pl_id },
- );
- },
- .aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
- inline .array_type, .vector_type => |array_type, tag| {
- const elem_ty = Type.fromInterned(array_type.child);
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
-
- const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod)));
- defer self.gpa.free(constituents);
-
- switch (aggregate.storage) {
- .bytes => |bytes| {
- // TODO: This is really space inefficient, perhaps there is a better
- // way to do it?
- for (bytes, 0..) |byte, i| {
- constituents[i] = try self.constInt(elem_ty_ref, byte);
- }
- },
- .elems => |elems| {
- for (0..@as(usize, @intCast(array_type.len))) |i| {
- constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect);
- }
- },
- .repeated_elem => |elem| {
- const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect);
- for (0..@as(usize, @intCast(array_type.len))) |i| {
- constituents[i] = val_id;
- }
- },
+ var constituents: [2]IdRef = undefined;
+ var types: [2]Type = undefined;
+ if (eu_layout.error_first) {
+ constituents[0] = try self.constant(err_ty, err_val, .indirect);
+ constituents[1] = try self.constant(payload_ty, payload_val, .indirect);
+ types = .{ err_ty, payload_ty };
+ } else {
+ constituents[0] = try self.constant(payload_ty, payload_val, .indirect);
+ constituents[1] = try self.constant(err_ty, err_val, .indirect);
+ types = .{ payload_ty, err_ty };
}
- switch (tag) {
- inline .array_type => {
- if (array_type.sentinel != .none) {
- const sentinel = Value.fromInterned(array_type.sentinel);
- constituents[constituents.len - 1] = try self.constant(elem_ty, sentinel, .indirect);
- }
- return self.constructArray(ty, constituents);
- },
- inline .vector_type => return self.constructVector(ty, constituents),
- else => unreachable,
- }
+ return try self.constructStruct(ty, &types, &constituents);
+ },
+ .enum_tag => {
+ const int_val = try val.intFromEnum(ty, mod);
+ const int_ty = ty.intTagType(mod);
+ break :cache try self.constant(int_ty, int_val, repr);
},
- .struct_type => {
- const struct_type = mod.typeToStruct(ty).?;
- if (struct_type.layout == .@"packed") {
- return self.todo("packed struct constants", .{});
+ .ptr => return self.constantPtr(ty, val),
+ .slice => |slice| {
+ const ptr_ty = ty.slicePtrFieldType(mod);
+ const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr));
+ const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect);
+ return self.constructStruct(
+ ty,
+ &.{ ptr_ty, Type.usize },
+ &.{ ptr_id, len_id },
+ );
+ },
+ .opt => {
+ const payload_ty = ty.optionalChild(mod);
+ const maybe_payload_val = val.optionalValue(mod);
+
+ if (!payload_ty.hasRuntimeBits(mod)) {
+ break :cache try self.constBool(maybe_payload_val != null, .indirect);
+ } else if (ty.optionalReprIsPayload(mod)) {
+ // Optional representation is a nullable pointer or slice.
+ if (maybe_payload_val) |payload_val| {
+ return try self.constant(payload_ty, payload_val, .indirect);
+ } else {
+ break :cache try self.spv.constNull(result_ty_id);
+ }
}
- var types = std.ArrayList(Type).init(self.gpa);
- defer types.deinit();
+ // Optional representation is a structure.
+ // { Payload, Bool }
- var constituents = std.ArrayList(IdRef).init(self.gpa);
- defer constituents.deinit();
+ const has_pl_id = try self.constBool(maybe_payload_val != null, .indirect);
+ const payload_id = if (maybe_payload_val) |payload_val|
+ try self.constant(payload_ty, payload_val, .indirect)
+ else
+ try self.spv.constUndef(try self.resolveType(payload_ty, .indirect));
+
+ return try self.constructStruct(
+ ty,
+ &.{ payload_ty, Type.bool },
+ &.{ payload_id, has_pl_id },
+ );
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
+ inline .array_type, .vector_type => |array_type, tag| {
+ const elem_ty = Type.fromInterned(array_type.child);
+
+ const constituents = try self.gpa.alloc(IdRef, @as(u32, @intCast(ty.arrayLenIncludingSentinel(mod))));
+ defer self.gpa.free(constituents);
+
+ switch (aggregate.storage) {
+ .bytes => |bytes| {
+ // TODO: This is really space inefficient, perhaps there is a better
+ // way to do it?
+ for (bytes, 0..) |byte, i| {
+ constituents[i] = try self.constInt(elem_ty, byte, .indirect);
+ }
+ },
+ .elems => |elems| {
+ for (0..@as(usize, @intCast(array_type.len))) |i| {
+ constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect);
+ }
+ },
+ .repeated_elem => |elem| {
+ const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect);
+ for (0..@as(usize, @intCast(array_type.len))) |i| {
+ constituents[i] = val_id;
+ }
+ },
+ }
- var it = struct_type.iterateRuntimeOrder(ip);
- while (it.next()) |field_index| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- // This is a zero-bit field - we only needed it for the alignment.
- continue;
+ switch (tag) {
+ inline .array_type => {
+ if (array_type.sentinel != .none) {
+ const sentinel = Value.fromInterned(array_type.sentinel);
+ constituents[constituents.len - 1] = try self.constant(elem_ty, sentinel, .indirect);
+ }
+ return self.constructArray(ty, constituents);
+ },
+ inline .vector_type => return self.constructVector(ty, constituents),
+ else => unreachable,
+ }
+ },
+ .struct_type => {
+ const struct_type = mod.typeToStruct(ty).?;
+ if (struct_type.layout == .@"packed") {
+ return self.todo("packed struct constants", .{});
}
- // TODO: Padding?
- const field_val = try val.fieldValue(mod, field_index);
- const field_id = try self.constant(field_ty, field_val, .indirect);
+ var types = std.ArrayList(Type).init(self.gpa);
+ defer types.deinit();
- try types.append(field_ty);
- try constituents.append(field_id);
- }
+ var constituents = std.ArrayList(IdRef).init(self.gpa);
+ defer constituents.deinit();
- return try self.constructStruct(ty, types.items, constituents.items);
+ var it = struct_type.iterateRuntimeOrder(ip);
+ while (it.next()) |field_index| {
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ // This is a zero-bit field - we only needed it for the alignment.
+ continue;
+ }
+
+ // TODO: Padding?
+ const field_val = try val.fieldValue(mod, field_index);
+ const field_id = try self.constant(field_ty, field_val, .indirect);
+
+ try types.append(field_ty);
+ try constituents.append(field_id);
+ }
+
+ return try self.constructStruct(ty, types.items, constituents.items);
+ },
+ .anon_struct_type => unreachable, // TODO
+ else => unreachable,
},
- .anon_struct_type => unreachable, // TODO
- else => unreachable,
- },
- .un => |un| {
- const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
- const union_obj = mod.typeToUnion(ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
- const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
- try self.constant(field_ty, Value.fromInterned(un.val), .direct)
- else
- null;
- return try self.unionInit(ty, active_field, payload);
- },
- .memoized_call => unreachable,
- }
+ .un => |un| {
+ const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
+ const union_obj = mod.typeToUnion(ty).?;
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
+ try self.constant(field_ty, Value.fromInterned(un.val), .direct)
+ else
+ null;
+ return try self.unionInit(ty, active_field, payload);
+ },
+ .memoized_call => unreachable,
+ }
+ };
+
+ try self.intern_map.putNoClobber(self.gpa, .{ val.toIntern(), repr }, cacheable_id);
+
+ return cacheable_id;
}
fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef {
- const result_ty_ref = try self.resolveType(ptr_ty, .direct);
+ // TODO: Caching??
+
+ const result_ty_id = try self.resolveType(ptr_ty, .direct);
const mod = self.module;
- if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_ref);
+ if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id);
switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
.decl => |decl| return try self.constantDeclRef(ptr_ty, decl),
@@ -1114,7 +1133,7 @@ const DeclGen = struct {
// that is not implemented by Mesa yet. Therefore, just generate it
// as a runtime operation.
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = ptr_id,
.integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct),
});
@@ -1126,23 +1145,23 @@ const DeclGen = struct {
.elem => |elem_ptr| {
const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base));
const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base));
- const size_ty_ref = try self.sizeType();
- const index_id = try self.constInt(size_ty_ref, elem_ptr.index);
+ const index_id = try self.constInt(Type.usize, elem_ptr.index, .direct);
const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
// TODO: Can we consolidate this in ptrElemPtr?
const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
- if (elem_ptr_ty_ref == result_ty_ref) {
+ // TODO: Can we remove this ID comparison?
+ if (elem_ptr_ty_id == result_ty_id) {
return elem_ptr_id;
}
// This may happen when we have pointer-to-array and the result is
// another pointer-to-array instead of a pointer-to-element.
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.operand = elem_ptr_id,
});
@@ -1166,7 +1185,7 @@ const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const decl_val = anon_decl.val;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
@@ -1181,7 +1200,7 @@ const DeclGen = struct {
// const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
// Pointer to nothing - return undefoined
- return self.spv.constUndef(ty_ref);
+ return self.spv.constUndef(ty_id);
}
if (decl_ty.zigTypeTag(mod) == .Fn) {
@@ -1190,14 +1209,14 @@ const DeclGen = struct {
// Anon decl refs are always generic.
assert(ty.ptrAddressSpace(mod) == .generic);
- const decl_ptr_ty_ref = try self.ptrType(decl_ty, .Generic);
+ const decl_ptr_ty_id = try self.ptrType(decl_ty, .Generic);
const ptr_id = try self.resolveAnonDecl(decl_val);
- if (decl_ptr_ty_ref != ty_ref) {
+ if (decl_ptr_ty_id != ty_id) {
// Differing pointer types, insert a cast.
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = casted_ptr_id,
.operand = ptr_id,
});
@@ -1209,15 +1228,14 @@ const DeclGen = struct {
fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef {
const mod = self.module;
- const ty_ref = try self.resolveType(ty, .direct);
- const ty_id = self.typeId(ty_ref);
+ const ty_id = try self.resolveType(ty, .direct);
const decl = mod.declPtr(decl_index);
switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
.func => {
// TODO: Properly lower function pointers. For now we are going to hack around it and
// just generate an empty pointer. Function pointers are represented by a pointer to usize.
- return try self.spv.constUndef(ty_ref);
+ return try self.spv.constUndef(ty_id);
},
.extern_func => unreachable, // TODO
else => {},
@@ -1225,7 +1243,7 @@ const DeclGen = struct {
if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
// Pointer to nothing - return undefined.
- return self.spv.constUndef(ty_ref);
+ return self.spv.constUndef(ty_id);
}
const spv_decl_index = try self.object.resolveDecl(mod, decl_index);
@@ -1239,14 +1257,14 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
try self.addFunctionDep(spv_decl_index, final_storage_class);
- const decl_ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const decl_ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
const ptr_id = switch (final_storage_class) {
- .Generic => try self.castToGeneric(self.typeId(decl_ptr_ty_ref), decl_id),
+ .Generic => try self.castToGeneric(decl_ptr_ty_id, decl_id),
else => decl_id,
};
- if (decl_ptr_ty_ref != ty_ref) {
+ if (decl_ptr_ty_id != ty_id) {
// Differing pointer types, insert a cast.
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
@@ -1261,28 +1279,18 @@ const DeclGen = struct {
}
// Turn a Zig type's name into a cache reference.
- fn resolveTypeName(self: *DeclGen, ty: Type) !CacheString {
+ fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 {
var name = std.ArrayList(u8).init(self.gpa);
defer name.deinit();
try ty.print(name.writer(), self.module);
- return try self.spv.resolveString(name.items);
- }
-
- /// Turn a Zig type into a SPIR-V Type, and return its type result-id.
- fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
- const type_ref = try self.resolveType(ty, .direct);
- return self.spv.resultId(type_ref);
- }
-
- fn typeId(self: *DeclGen, ty_ref: CacheRef) IdRef {
- return self.spv.resultId(ty_ref);
+ return try name.toOwnedSlice();
}
/// Create an integer type suitable for storing at least 'bits' bits.
/// The integer type that is returned by this function is the type that is used to perform
/// actual operations (as well as store) a Zig type of a particular number of bits. To create
/// a type with an exact size, use SpvModule.intType.
- fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
+ fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !IdRef {
const backing_bits = self.backingIntBits(bits) orelse {
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
@@ -1297,36 +1305,69 @@ const DeclGen = struct {
return self.spv.intType(.unsigned, backing_bits);
}
- /// Create an integer type that represents 'usize'.
- fn sizeType(self: *DeclGen) !CacheRef {
- return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
+ fn arrayType(self: *DeclGen, len: u32, child_ty: IdRef) !IdRef {
+ // TODO: Cache??
+ const len_id = try self.constInt(Type.u32, len, .direct);
+ const result_id = self.spv.allocId();
+
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeArray, .{
+ .id_result = result_id,
+ .element_type = child_ty,
+ .length = len_id,
+ });
+ return result_id;
}
- fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !CacheRef {
+ fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !IdRef {
const key = .{ child_ty.toIntern(), storage_class };
- const entry = try self.wip_pointers.getOrPut(self.gpa, key);
+ const entry = try self.ptr_types.getOrPut(self.gpa, key);
if (entry.found_existing) {
- const fwd_ref = entry.value_ptr.*;
- try self.spv.cache.recursive_ptrs.put(self.spv.gpa, fwd_ref, {});
- return fwd_ref;
+ const fwd_id = entry.value_ptr.ty_id;
+ if (!entry.value_ptr.fwd_emitted) {
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeForwardPointer, .{
+ .pointer_type = fwd_id,
+ .storage_class = storage_class,
+ });
+ entry.value_ptr.fwd_emitted = true;
+ }
+ return fwd_id;
}
- const fwd_ref = try self.spv.resolve(.{ .fwd_ptr_type = .{
- .zig_child_type = child_ty.toIntern(),
- .storage_class = storage_class,
- } });
- entry.value_ptr.* = fwd_ref;
+ const result_id = self.spv.allocId();
+ entry.value_ptr.* = .{
+ .ty_id = result_id,
+ .fwd_emitted = false,
+ };
- const child_ty_ref = try self.resolveType(child_ty, .indirect);
- _ = try self.spv.resolve(.{ .ptr_type = .{
+ const child_ty_id = try self.resolveType(child_ty, .indirect);
+
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
+ .id_result = result_id,
.storage_class = storage_class,
- .child_type = child_ty_ref,
- .fwd = fwd_ref,
- } });
+ .type = child_ty_id,
+ });
+
+ return result_id;
+ }
+
+ fn functionType(self: *DeclGen, return_ty: Type, param_types: []const Type) !IdRef {
+ // TODO: Cache??
- assert(self.wip_pointers.remove(key));
+ const param_ids = try self.gpa.alloc(IdRef, param_types.len);
+ defer self.gpa.free(param_ids);
- return fwd_ref;
+ for (param_types, param_ids) |param_ty, *param_id| {
+ param_id.* = try self.resolveType(param_ty, .direct);
+ }
+
+ const ty_id = self.spv.allocId();
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeFunction, .{
+ .id_result = ty_id,
+ .return_type = try self.resolveFnReturnType(return_ty),
+ .id_ref_2 = param_ids,
+ });
+
+ return ty_id;
}
/// Generate a union type. Union types are always generated with the
@@ -1347,7 +1388,7 @@ const DeclGen = struct {
/// padding: [padding_size]u8,
/// }
/// If any of the fields' size is 0, it will be omitted.
- fn resolveUnionType(self: *DeclGen, ty: Type) !CacheRef {
+ fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef {
const mod = self.module;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -1362,48 +1403,43 @@ const DeclGen = struct {
return try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
+ var member_types: [4]IdRef = undefined;
+ var member_names: [4][]const u8 = undefined;
- var member_types: [4]CacheRef = undefined;
- var member_names: [4]CacheString = undefined;
-
- const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
+ const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled?
if (layout.tag_size != 0) {
- const tag_ty_ref = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
- member_types[layout.tag_index] = tag_ty_ref;
- member_names[layout.tag_index] = try self.spv.resolveString("(tag)");
+ const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
+ member_types[layout.tag_index] = tag_ty_id;
+ member_names[layout.tag_index] = "(tag)";
}
if (layout.payload_size != 0) {
- const payload_ty_ref = try self.resolveType(layout.payload_ty, .indirect);
- member_types[layout.payload_index] = payload_ty_ref;
- member_names[layout.payload_index] = try self.spv.resolveString("(payload)");
+ const payload_ty_id = try self.resolveType(layout.payload_ty, .indirect);
+ member_types[layout.payload_index] = payload_ty_id;
+ member_names[layout.payload_index] = "(payload)";
}
if (layout.payload_padding_size != 0) {
- const payload_padding_ty_ref = try self.spv.arrayType(@intCast(layout.payload_padding_size), u8_ty_ref);
- member_types[layout.payload_padding_index] = payload_padding_ty_ref;
- member_names[layout.payload_padding_index] = try self.spv.resolveString("(payload padding)");
+ const payload_padding_ty_id = try self.arrayType(@intCast(layout.payload_padding_size), u8_ty_id);
+ member_types[layout.payload_padding_index] = payload_padding_ty_id;
+ member_names[layout.payload_padding_index] = "(payload padding)";
}
if (layout.padding_size != 0) {
- const padding_ty_ref = try self.spv.arrayType(@intCast(layout.padding_size), u8_ty_ref);
- member_types[layout.padding_index] = padding_ty_ref;
- member_names[layout.padding_index] = try self.spv.resolveString("(padding)");
+ const padding_ty_id = try self.arrayType(@intCast(layout.padding_size), u8_ty_id);
+ member_types[layout.padding_index] = padding_ty_id;
+ member_names[layout.padding_index] = "(padding)";
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types[0..layout.total_fields],
- .member_names = member_names[0..layout.total_fields],
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types[0..layout.total_fields], member_names[0..layout.total_fields]);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
}
- fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !CacheRef {
+ fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef {
const mod = self.module;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
@@ -1420,26 +1456,46 @@ const DeclGen = struct {
}
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
- fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
+ fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
+ if (self.intern_map.get(.{ ty.toIntern(), repr })) |id| {
+ return id;
+ }
+
+ const id = try self.resolveTypeInner(ty, repr);
+ try self.intern_map.put(self.gpa, .{ ty.toIntern(), repr }, id);
+ return id;
+ }
+
+ fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
const mod = self.module;
const ip = &mod.intern_pool;
log.debug("resolveType: ty = {}", .{ty.fmt(mod)});
const target = self.getTarget();
+
+ const section = &self.spv.sections.types_globals_constants;
+
switch (ty.zigTypeTag(mod)) {
.NoReturn => {
assert(repr == .direct);
- return try self.spv.resolve(.void_type);
+ return try self.spv.voidType();
},
.Void => switch (repr) {
- .direct => return try self.spv.resolve(.void_type),
+ .direct => {
+ return try self.spv.voidType();
+ },
// Pointers to void
- .indirect => return try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("void"),
- } }),
+ .indirect => {
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "void",
+ });
+ return result_id;
+ },
},
.Bool => switch (repr) {
- .direct => return try self.spv.resolve(.bool_type),
- .indirect => return try self.intType(.unsigned, 1),
+ .direct => return try self.spv.boolType(),
+ .indirect => return try self.resolveType(Type.u1, .indirect),
},
.Int => {
const int_info = ty.intInfo(mod);
@@ -1447,15 +1503,18 @@ const DeclGen = struct {
// Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
// with 0 bits is invalid, so return an opaque type in this case.
assert(repr == .indirect);
- return try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("u0"),
- } });
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "u0",
+ });
+ return result_id;
}
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
const tag_ty = ty.intTagType(mod);
- return self.resolveType(tag_ty, repr);
+ return try self.resolveType(tag_ty, repr);
},
.Float => {
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
@@ -1473,27 +1532,29 @@ const DeclGen = struct {
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
- return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
+ return try self.spv.floatType(bits);
},
.Array => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const elem_ty = ty.childType(mod);
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
+ const elem_ty_id = try self.resolveType(elem_ty, .indirect);
const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
- const ty_ref = if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
+
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached when the backend is asked to generate a pointer to
// an array of some zero-bit type. This should always be an indirect path.
assert(repr == .indirect);
// We cannot use the child type here, so just use an opaque type.
- break :blk try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("zero-sized array"),
- } });
- } else if (total_len == 0) blk: {
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "zero-sized array",
+ });
+ return result_id;
+ } else if (total_len == 0) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached for example when there is a slicing of a pointer
// that produces a zero-length array. In all cases where this type can be generated,
@@ -1503,16 +1564,13 @@ const DeclGen = struct {
// In this case, we have an array of a non-zero sized type. In this case,
// generate an array of 1 element instead, so that ptr_elem_ptr instructions
// can be lowered to ptrAccessChain instead of manually performing the math.
- break :blk try self.spv.arrayType(1, elem_ty_ref);
- } else try self.spv.arrayType(total_len, elem_ty_ref);
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.arrayType(1, elem_ty_id);
+ } else {
+ return try self.arrayType(total_len, elem_ty_id);
+ }
},
.Fn => switch (repr) {
.direct => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const fn_info = mod.typeToFunc(ty).?;
comptime assert(zig_call_abi_ver == 3);
@@ -1525,75 +1583,67 @@ const DeclGen = struct {
if (fn_info.is_var_args)
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
- const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len);
- defer self.gpa.free(param_ty_refs);
+ // Note: Logic is different from functionType().
+ const param_ty_ids = try self.gpa.alloc(IdRef, fn_info.param_types.len);
+ defer self.gpa.free(param_ty_ids);
var param_index: usize = 0;
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- param_ty_refs[param_index] = try self.resolveType(param_ty, .direct);
+ param_ty_ids[param_index] = try self.resolveType(param_ty, .direct);
param_index += 1;
}
- const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
- const ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = return_ty_ref,
- .parameters = param_ty_refs[0..param_index],
- } });
+ const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
+
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeFunction, .{
+ .id_result = result_id,
+ .return_type = return_ty_id,
+ .id_ref_2 = param_ty_ids[0..param_index],
+ });
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return result_id;
},
.indirect => {
// TODO: Represent function pointers properly.
// For now, just use an usize type.
- return try self.sizeType();
+ return try self.resolveType(Type.usize, .indirect);
},
},
.Pointer => {
const ptr_info = ty.ptrInfo(mod);
- // Note: Don't cache this pointer type, it would mess up the recursive pointer functionality
- // in ptrType()!
-
const storage_class = self.spvStorageClass(ptr_info.flags.address_space);
- const ptr_ty_ref = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
+ const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
if (ptr_info.flags.size != .Slice) {
- return ptr_ty_ref;
+ return ptr_ty_id;
}
- const size_ty_ref = try self.sizeType();
- return self.spv.resolve(.{ .struct_type = .{
- .member_types = &.{ ptr_ty_ref, size_ty_ref },
- .member_names = &.{
- try self.spv.resolveString("ptr"),
- try self.spv.resolveString("len"),
- },
- } });
+ const size_ty_id = try self.resolveType(Type.usize, .direct);
+ return self.spv.structType(
+ &.{ ptr_ty_id, size_ty_id },
+ &.{ "ptr", "len" },
+ );
},
.Vector => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const elem_ty = ty.childType(mod);
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
+ // TODO: Make `.direct`.
+ const elem_ty_id = try self.resolveType(elem_ty, .indirect);
const len = ty.vectorLen(mod);
- const ty_ref = if (self.isVector(ty))
- try self.spv.vectorType(len, elem_ty_ref)
- else
- try self.spv.arrayType(len, elem_ty_ref);
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ if (self.isVector(ty)) {
+ return try self.spv.vectorType(len, elem_ty_id);
+ } else {
+ return try self.arrayType(len, elem_ty_id);
+ }
},
.Struct => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
- const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
+ const member_types = try self.gpa.alloc(IdRef, tuple.values.len);
defer self.gpa.free(member_types);
var member_index: usize = 0;
@@ -1604,13 +1654,11 @@ const DeclGen = struct {
member_index += 1;
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types[0..member_index],
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types[0..member_index], null);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
},
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
@@ -1620,10 +1668,10 @@ const DeclGen = struct {
return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
}
- var member_types = std.ArrayList(CacheRef).init(self.gpa);
+ var member_types = std.ArrayList(IdRef).init(self.gpa);
defer member_types.deinit();
- var member_names = std.ArrayList(CacheString).init(self.gpa);
+ var member_names = std.ArrayList([]const u8).init(self.gpa);
defer member_names.deinit();
var it = struct_type.iterateRuntimeOrder(ip);
@@ -1637,17 +1685,14 @@ const DeclGen = struct {
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index});
try member_types.append(try self.resolveType(field_ty, .indirect));
- try member_names.append(try self.spv.resolveString(ip.stringToSlice(field_name)));
+ try member_names.append(ip.stringToSlice(field_name));
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types.items,
- .member_names = member_names.items,
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types.items, member_names.items);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
},
.Optional => {
const payload_ty = ty.optionalChild(mod);
@@ -1658,77 +1703,58 @@ const DeclGen = struct {
return try self.resolveType(Type.bool, .indirect);
}
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
if (ty.optionalReprIsPayload(mod)) {
// Optional is actually a pointer or a slice.
- return payload_ty_ref;
+ return payload_ty_id;
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
- const bool_ty_ref = try self.resolveType(Type.bool, .indirect);
+ const bool_ty_id = try self.resolveType(Type.bool, .indirect);
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .member_types = &.{ payload_ty_ref, bool_ty_ref },
- .member_names = &.{
- try self.spv.resolveString("payload"),
- try self.spv.resolveString("valid"),
- },
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.spv.structType(
+ &.{ payload_ty_id, bool_ty_id },
+ &.{ "payload", "valid" },
+ );
},
.Union => return try self.resolveUnionType(ty),
- .ErrorSet => return try self.intType(.unsigned, 16),
+ .ErrorSet => return try self.resolveType(Type.u16, repr),
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(mod);
- const error_ty_ref = try self.resolveType(Type.anyerror, .indirect);
+ const error_ty_id = try self.resolveType(Type.anyerror, .indirect);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
- return error_ty_ref;
+ return error_ty_id;
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
-
- var member_types: [2]CacheRef = undefined;
- var member_names: [2]CacheString = undefined;
+ var member_types: [2]IdRef = undefined;
+ var member_names: [2][]const u8 = undefined;
if (eu_layout.error_first) {
// Put the error first
- member_types = .{ error_ty_ref, payload_ty_ref };
- member_names = .{
- try self.spv.resolveString("error"),
- try self.spv.resolveString("payload"),
- };
+ member_types = .{ error_ty_id, payload_ty_id };
+ member_names = .{ "error", "payload" };
// TODO: ABI padding?
} else {
// Put the payload first.
- member_types = .{ payload_ty_ref, error_ty_ref };
- member_names = .{
- try self.spv.resolveString("payload"),
- try self.spv.resolveString("error"),
- };
+ member_types = .{ payload_ty_id, error_ty_id };
+ member_names = .{ "payload", "error" };
// TODO: ABI padding?
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = &member_types,
- .member_names = &member_names,
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.spv.structType(&member_types, &member_names);
},
.Opaque => {
- return try self.spv.resolve(.{
- .opaque_type = .{
- .name = .none, // TODO
- },
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = type_name,
});
+ return result_id;
},
.Null,
@@ -1736,9 +1762,10 @@ const DeclGen = struct {
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
+ .Type,
=> unreachable, // Must be comptime.
- else => |tag| return self.todo("Implement zig type '{}'", .{tag}),
+ .Frame, .AnyFrame => unreachable, // TODO
}
}
@@ -1887,7 +1914,6 @@ const DeclGen = struct {
result_ty: Type,
ty: Type,
/// Always in direct representation.
- ty_ref: CacheRef,
ty_id: IdRef,
/// True if the input is an array type.
is_array: bool,
@@ -1947,14 +1973,13 @@ const DeclGen = struct {
@memset(results, undefined);
const ty = if (is_array) result_ty.scalarType(mod) else result_ty;
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
return .{
.dg = self,
.result_ty = result_ty,
.ty = ty,
- .ty_ref = ty_ref,
- .ty_id = self.typeId(ty_ref),
+ .ty_id = ty_id,
.is_array = is_array,
.results = results,
};
@@ -1981,16 +2006,13 @@ const DeclGen = struct {
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
- const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const ptr_anyerror_ty_ref = try self.ptrType(Type.anyerror, .CrossWorkgroup);
- const void_ty_ref = try self.resolveType(Type.void, .direct);
-
- const kernel_proto_ty_ref = try self.spv.resolve(.{
- .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{ptr_anyerror_ty_ref},
- },
+ const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
+ const ptr_anyerror_ty = try self.module.ptrType(.{
+ .child = Type.anyerror.toIntern(),
+ .flags = .{ .address_space = .global },
});
+ const ptr_anyerror_ty_id = try self.resolveType(ptr_anyerror_ty, .direct);
+ const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty});
const test_id = self.spv.declPtr(spv_test_decl_index).result_id;
@@ -2002,20 +2024,20 @@ const DeclGen = struct {
const section = &self.spv.sections.functions;
try section.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = kernel_id,
.function_control = .{},
- .function_type = self.typeId(kernel_proto_ty_ref),
+ .function_type = kernel_proto_ty_id,
});
try section.emit(self.spv.gpa, .OpFunctionParameter, .{
- .id_result_type = self.typeId(ptr_anyerror_ty_ref),
+ .id_result_type = ptr_anyerror_ty_id,
.id_result = p_error_id,
});
try section.emit(self.spv.gpa, .OpLabel, .{
.id_result = self.spv.allocId(),
});
try section.emit(self.spv.gpa, .OpFunctionCall, .{
- .id_result_type = self.typeId(anyerror_ty_ref),
+ .id_result_type = anyerror_ty_id,
.id_result = error_id,
.function = test_id,
});
@@ -2047,17 +2069,17 @@ const DeclGen = struct {
.func => {
assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
- const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
+ const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
- const prototype_ty_ref = try self.resolveType(decl.typeOf(mod), .direct);
+ const prototype_ty_id = try self.resolveType(decl.typeOf(mod), .direct);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(return_ty_ref),
+ .id_result_type = return_ty_id,
.id_result = result_id,
.function_control = switch (fn_info.cc) {
.Inline => .{ .Inline = true },
else => .{},
},
- .function_type = self.typeId(prototype_ty_ref),
+ .function_type = prototype_ty_id,
});
comptime assert(zig_call_abi_ver == 3);
@@ -2066,7 +2088,7 @@ const DeclGen = struct {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const param_type_id = try self.resolveTypeId(param_ty);
+ const param_type_id = try self.resolveType(param_ty, .direct);
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -2122,10 +2144,10 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
assert(final_storage_class != .Generic); // These should be instance globals
- const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.storage_class = final_storage_class,
});
@@ -2145,22 +2167,18 @@ const DeclGen = struct {
try self.spv.declareDeclDeps(spv_decl_index, &.{});
- const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), .Function);
+ const ptr_ty_id = try self.ptrType(decl.typeOf(mod), .Function);
if (maybe_init_val) |init_val| {
// TODO: Combine with resolveAnonDecl?
- const void_ty_ref = try self.resolveType(Type.void, .direct);
- const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{},
- } });
+ const initializer_proto_ty_id = try self.functionType(Type.void, &.{});
const initializer_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = initializer_id,
.function_control = .{},
- .function_type = self.typeId(initializer_proto_ty_ref),
+ .function_type = initializer_proto_ty_id,
});
const root_block_id = self.spv.allocId();
@@ -2183,7 +2201,7 @@ const DeclGen = struct {
try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -2191,7 +2209,7 @@ const DeclGen = struct {
});
} else {
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -2202,12 +2220,12 @@ const DeclGen = struct {
}
}
- fn intFromBool(self: *DeclGen, result_ty_ref: CacheRef, condition_id: IdRef) !IdRef {
- const zero_id = try self.constInt(result_ty_ref, 0);
- const one_id = try self.constInt(result_ty_ref, 1);
+ fn intFromBool(self: *DeclGen, ty: Type, condition_id: IdRef) !IdRef {
+ const zero_id = try self.constInt(ty, 0, .direct);
+ const one_id = try self.constInt(ty, 1, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = result_id,
.condition = condition_id,
.object_1 = one_id,
@@ -2222,15 +2240,12 @@ const DeclGen = struct {
const mod = self.module;
return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
- const direct_bool_ty_ref = try self.resolveType(ty, .direct);
- const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
- const zero_id = try self.constInt(indirect_bool_ty_ref, 0);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = self.typeId(direct_bool_ty_ref),
+ .id_result_type = try self.resolveType(Type.bool, .direct),
.id_result = result_id,
.operand_1 = operand_id,
- .operand_2 = zero_id,
+ .operand_2 = try self.constBool(false, .indirect),
});
break :blk result_id;
},
@@ -2243,20 +2258,17 @@ const DeclGen = struct {
fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
const mod = self.module;
return switch (ty.zigTypeTag(mod)) {
- .Bool => blk: {
- const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
- break :blk self.intFromBool(indirect_bool_ty_ref, operand_id);
- },
+ .Bool => try self.intFromBool(Type.u1, operand_id),
else => operand_id,
};
}
fn extractField(self: *DeclGen, result_ty: Type, object: IdRef, field: u32) !IdRef {
- const result_ty_ref = try self.resolveType(result_ty, .indirect);
+ const result_ty_id = try self.resolveType(result_ty, .indirect);
const result_id = self.spv.allocId();
const indexes = [_]u32{field};
try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.composite = object,
.indexes = &indexes,
@@ -2270,13 +2282,13 @@ const DeclGen = struct {
};
fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
- const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect);
+ const indirect_value_ty_id = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
.Volatile = options.is_volatile,
};
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
- .id_result_type = self.typeId(indirect_value_ty_ref),
+ .id_result_type = indirect_value_ty_id,
.id_result = result_id,
.pointer = ptr_id,
.memory_access = access,
@@ -2488,7 +2500,8 @@ const DeclGen = struct {
const result_ty = self.typeOfIndex(inst);
const shift_ty = self.typeOf(bin_op.rhs);
- const shift_ty_ref = try self.resolveType(shift_ty, .direct);
+ const scalar_result_ty_id = try self.resolveType(result_ty.scalarType(mod), .direct);
+ const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct);
const info = self.arithmeticTypeInfo(result_ty);
switch (info.class) {
@@ -2505,7 +2518,7 @@ const DeclGen = struct {
// Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
// so just manually upcast it if required.
- const shift_id = if (shift_ty_ref != wip.ty_ref) blk: {
+ const shift_id = if (scalar_shift_ty_id != scalar_result_ty_id) blk: {
const shift_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
.id_result_type = wip.ty_id,
@@ -2529,7 +2542,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, unsigned, args);
}
- result_id.* = try self.normalize(wip.ty_ref, value_id, info);
+ result_id.* = try self.normalize(wip.ty, value_id, info);
}
return try wip.finalize();
}
@@ -2622,7 +2635,7 @@ const DeclGen = struct {
/// - Signed integers are also sign extended if they are negative.
/// All other values are returned unmodified (this makes strange integer
/// wrapping easier to use in generic operations).
- fn normalize(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, info: ArithmeticTypeInfo) !IdRef {
+ fn normalize(self: *DeclGen, ty: Type, value_id: IdRef, info: ArithmeticTypeInfo) !IdRef {
switch (info.class) {
.integer, .bool, .float => return value_id,
.composite_integer => unreachable, // TODO
@@ -2630,9 +2643,9 @@ const DeclGen = struct {
.unsigned => {
const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1;
const result_id = self.spv.allocId();
- const mask_id = try self.constInt(ty_ref, mask_value);
+ const mask_id = try self.constInt(ty, mask_value, .direct);
try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = result_id,
.operand_1 = value_id,
.operand_2 = mask_id,
@@ -2641,17 +2654,17 @@ const DeclGen = struct {
},
.signed => {
// Shift left and right so that we can copy the sight bit that way.
- const shift_amt_id = try self.constInt(ty_ref, info.backing_bits - info.bits);
+ const shift_amt_id = try self.constInt(ty, info.backing_bits - info.bits, .direct);
const left_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpShiftLeftLogical, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = left_id,
.base = value_id,
.shift = shift_amt_id,
});
const right_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpShiftRightArithmetic, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = right_id,
.base = left_id,
.shift = shift_amt_id,
@@ -2667,13 +2680,13 @@ const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const ty = self.typeOfIndex(inst);
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const info = self.arithmeticTypeInfo(ty);
switch (info.class) {
.composite_integer => unreachable, // TODO
.integer, .strange_integer => {
- const zero_id = try self.constInt(ty_ref, 0);
- const one_id = try self.constInt(ty_ref, 1);
+ const zero_id = try self.constInt(ty, 0, .direct);
+ const one_id = try self.constInt(ty, 1, .direct);
// (a ^ b) > 0
const bin_bitwise_id = try self.binOpSimple(ty, lhs_id, rhs_id, .OpBitwiseXor);
@@ -2696,14 +2709,14 @@ const DeclGen = struct {
const negative_div_id = try self.arithOp(ty, negative_div_lhs, rhs_abs, .OpFDiv, .OpSDiv, .OpUDiv);
const negated_negative_div_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSNegate, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = negated_negative_div_id,
.operand = negative_div_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = result_id,
.condition = is_positive_id,
.object_1 = positive_div_id,
@@ -2728,7 +2741,7 @@ const DeclGen = struct {
fn floor(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
const target = self.getTarget();
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const ext_inst: Word = switch (target.os.tag) {
.opencl => 25,
.vulkan => 8,
@@ -2742,7 +2755,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = result_id,
.set = set_id,
.instruction = .{ .inst = ext_inst },
@@ -2819,7 +2832,7 @@ const DeclGen = struct {
// TODO: Trap on overflow? Probably going to be annoying.
// TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
- result_id.* = try self.normalize(wip.ty_ref, value_id, info);
+ result_id.* = try self.normalize(wip.ty, value_id, info);
}
return try wip.finalize();
@@ -2897,11 +2910,12 @@ const DeclGen = struct {
const operand_ty = self.typeOf(extra.lhs);
const ov_ty = result_ty.structFieldType(1, self.module);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const cmp_ty_ref = if (self.isVector(operand_ty))
- try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref)
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
+ const cmp_ty_id = if (self.isVector(operand_ty))
+ // TODO: Resolving a vector type with .direct should return a SPIR-V vector
+ try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct))
else
- bool_ty_ref;
+ bool_ty_id;
const info = self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
@@ -2929,7 +2943,7 @@ const DeclGen = struct {
});
// Normalize the result so that the comparisons go well
- result_id.* = try self.normalize(wip_result.ty_ref, value_id, info);
+ result_id.* = try self.normalize(wip_result.ty, value_id, info);
const overflowed_id = switch (info.signedness) {
.unsigned => blk: {
@@ -2937,7 +2951,7 @@ const DeclGen = struct {
// For subtraction the conditions need to be swapped.
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, ucmp, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = result_id.*,
.operand_2 = lhs_elem_id,
@@ -2963,9 +2977,9 @@ const DeclGen = struct {
// = (rhs < 0) == (lhs > value)
const rhs_lt_zero_id = self.spv.allocId();
- const zero_id = try self.constInt(wip_result.ty_ref, 0);
+ const zero_id = try self.constInt(wip_result.ty, 0, .direct);
try self.func.body.emit(self.spv.gpa, .OpSLessThan, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = rhs_lt_zero_id,
.operand_1 = rhs_elem_id,
.operand_2 = zero_id,
@@ -2973,7 +2987,7 @@ const DeclGen = struct {
const value_gt_lhs_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, scmp, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = value_gt_lhs_id,
.operand_1 = lhs_elem_id,
.operand_2 = result_id.*,
@@ -2981,7 +2995,7 @@ const DeclGen = struct {
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalEqual, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = rhs_lt_zero_id,
.operand_2 = value_gt_lhs_id,
@@ -2990,7 +3004,7 @@ const DeclGen = struct {
},
};
- ov_id.* = try self.intFromBool(wip_ov.ty_ref, overflowed_id);
+ ov_id.* = try self.intFromBool(wip_ov.ty, overflowed_id);
}
return try self.constructStruct(
@@ -3022,9 +3036,9 @@ const DeclGen = struct {
var wip_ov = try self.elementWise(ov_ty, true);
defer wip_ov.deinit();
- const zero_id = try self.constInt(wip_result.ty_ref, 0);
- const zero_ov_id = try self.constInt(wip_ov.ty_ref, 0);
- const one_ov_id = try self.constInt(wip_ov.ty_ref, 1);
+ const zero_id = try self.constInt(wip_result.ty, 0, .direct);
+ const zero_ov_id = try self.constInt(wip_ov.ty, 0, .direct);
+ const one_ov_id = try self.constInt(wip_ov.ty, 1, .direct);
for (wip_result.results, wip_ov.results, 0..) |*result_id, *ov_id, i| {
const lhs_elem_id = try wip_result.elementAt(operand_ty, lhs, i);
@@ -3065,15 +3079,17 @@ const DeclGen = struct {
const result_ty = self.typeOfIndex(inst);
const operand_ty = self.typeOf(extra.lhs);
const shift_ty = self.typeOf(extra.rhs);
- const shift_ty_ref = try self.resolveType(shift_ty, .direct);
+ const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct);
+ const scalar_operand_ty_id = try self.resolveType(operand_ty.scalarType(mod), .direct);
const ov_ty = result_ty.structFieldType(1, self.module);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const cmp_ty_ref = if (self.isVector(operand_ty))
- try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref)
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
+ const cmp_ty_id = if (self.isVector(operand_ty))
+ // TODO: Resolving a vector type with .direct should return a SPIR-V vector
+ try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct))
else
- bool_ty_ref;
+ bool_ty_id;
const info = self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
@@ -3092,7 +3108,7 @@ const DeclGen = struct {
// Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
// so just manually upcast it if required.
- const shift_id = if (shift_ty_ref != wip_result.ty_ref) blk: {
+ const shift_id = if (scalar_shift_ty_id != scalar_operand_ty_id) blk: {
const shift_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
.id_result_type = wip_result.ty_id,
@@ -3109,7 +3125,7 @@ const DeclGen = struct {
.base = lhs_elem_id,
.shift = shift_id,
});
- result_id.* = try self.normalize(wip_result.ty_ref, value_id, info);
+ result_id.* = try self.normalize(wip_result.ty, value_id, info);
const right_shift_id = self.spv.allocId();
switch (info.signedness) {
@@ -3133,13 +3149,13 @@ const DeclGen = struct {
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = lhs_elem_id,
.operand_2 = right_shift_id,
});
- ov_id.* = try self.intFromBool(wip_ov.ty_ref, overflowed_id);
+ ov_id.* = try self.intFromBool(wip_ov.ty, overflowed_id);
}
return try self.constructStruct(
@@ -3204,8 +3220,7 @@ const DeclGen = struct {
defer wip.deinit();
const elem_ty = if (wip.is_array) operand_ty.scalarType(mod) else operand_ty;
- const elem_ty_ref = try self.resolveType(elem_ty, .direct);
- const elem_ty_id = self.typeId(elem_ty_ref);
+ const elem_ty_id = try self.resolveType(elem_ty, .direct);
for (wip.results, 0..) |*result_id, i| {
const elem = try wip.elementAt(operand_ty, operand, i);
@@ -3230,6 +3245,8 @@ const DeclGen = struct {
.id_ref_4 = &.{elem},
});
+ // TODO: Comparison should be removed..
+ // Its valid because SpvModule caches numeric types
if (wip.ty_id == elem_ty_id) {
result_id.* = tmp;
continue;
@@ -3276,8 +3293,7 @@ const DeclGen = struct {
const operand = try self.resolve(reduce.operand);
const operand_ty = self.typeOf(reduce.operand);
const scalar_ty = operand_ty.scalarType(mod);
- const scalar_ty_ref = try self.resolveType(scalar_ty, .direct);
- const scalar_ty_id = self.typeId(scalar_ty_ref);
+ const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
const info = self.arithmeticTypeInfo(operand_ty);
@@ -3351,7 +3367,7 @@ const DeclGen = struct {
for (wip.results, 0..) |*result_id, i| {
const elem = try mask.elemValue(mod, i);
if (elem.isUndef(mod)) {
- result_id.* = try self.spv.constUndef(wip.ty_ref);
+ result_id.* = try self.spv.constUndef(wip.ty_id);
continue;
}
@@ -3366,11 +3382,10 @@ const DeclGen = struct {
}
fn indicesToIds(self: *DeclGen, indices: []const u32) ![]IdRef {
- const index_ty_ref = try self.intType(.unsigned, 32);
const ids = try self.gpa.alloc(IdRef, indices.len);
errdefer self.gpa.free(ids);
for (indices, ids) |index, *id| {
- id.* = try self.constInt(index_ty_ref, index);
+ id.* = try self.constInt(Type.u32, index, .direct);
}
return ids;
@@ -3378,13 +3393,13 @@ const DeclGen = struct {
fn accessChainId(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
indices: []const IdRef,
) !IdRef {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.base = base,
.indexes = indices,
@@ -3398,18 +3413,18 @@ const DeclGen = struct {
/// is the latter and PtrAccessChain is the former.
fn accessChain(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
indices: []const u32,
) !IdRef {
const ids = try self.indicesToIds(indices);
defer self.gpa.free(ids);
- return try self.accessChainId(result_ty_ref, base, ids);
+ return try self.accessChainId(result_ty_id, base, ids);
}
fn ptrAccessChain(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
element: IdRef,
indices: []const u32,
@@ -3419,7 +3434,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.base = base,
.element = element,
@@ -3430,21 +3445,21 @@ const DeclGen = struct {
fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
const mod = self.module;
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
switch (ptr_ty.ptrSize(mod)) {
.One => {
// Pointer to array
// TODO: Is this correct?
- return try self.accessChainId(result_ty_ref, ptr_id, &.{offset_id});
+ return try self.accessChainId(result_ty_id, ptr_id, &.{offset_id});
},
.C, .Many => {
- return try self.ptrAccessChain(result_ty_ref, ptr_id, offset_id, &.{});
+ return try self.ptrAccessChain(result_ty_id, ptr_id, offset_id, &.{});
},
.Slice => {
// TODO: This is probably incorrect. A slice should be returned here, though this is what llvm does.
const slice_ptr_id = try self.extractField(result_ty, ptr_id, 0);
- return try self.ptrAccessChain(result_ty_ref, slice_ptr_id, offset_id, &.{});
+ return try self.ptrAccessChain(result_ty_id, slice_ptr_id, offset_id, &.{});
},
}
}
@@ -3467,12 +3482,12 @@ const DeclGen = struct {
const ptr_ty = self.typeOf(bin_op.lhs);
const offset_id = try self.resolve(bin_op.rhs);
const offset_ty = self.typeOf(bin_op.rhs);
- const offset_ty_ref = try self.resolveType(offset_ty, .direct);
+ const offset_ty_id = try self.resolveType(offset_ty, .direct);
const result_ty = self.typeOfIndex(inst);
const negative_offset_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSNegate, .{
- .id_result_type = self.typeId(offset_ty_ref),
+ .id_result_type = offset_ty_id,
.id_result = negative_offset_id,
.operand = offset_id,
});
@@ -3490,7 +3505,7 @@ const DeclGen = struct {
const mod = self.module;
var cmp_lhs_id = lhs_id;
var cmp_rhs_id = rhs_id;
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const op_ty = switch (ty.zigTypeTag(mod)) {
.Int, .Bool, .Float => ty,
.Enum => ty.intTagType(mod),
@@ -3502,7 +3517,7 @@ const DeclGen = struct {
cmp_lhs_id = self.spv.allocId();
cmp_rhs_id = self.spv.allocId();
- const usize_ty_id = self.typeId(try self.sizeType());
+ const usize_ty_id = try self.resolveType(Type.usize, .direct);
try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
.id_result_type = usize_ty_id,
@@ -3564,20 +3579,20 @@ const DeclGen = struct {
const pl_eq_id = try self.cmp(op, Type.bool, payload_ty, lhs_pl_id, rhs_pl_id);
const lhs_not_valid_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = lhs_not_valid_id,
.operand = lhs_valid_id,
});
const impl_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = impl_id,
.operand_1 = lhs_not_valid_id,
.operand_2 = pl_eq_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = valid_eq_id,
.operand_2 = impl_id,
@@ -3590,14 +3605,14 @@ const DeclGen = struct {
const impl_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = impl_id,
.operand_1 = lhs_valid_id,
.operand_2 = pl_neq_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = valid_neq_id,
.operand_2 = impl_id,
@@ -3665,7 +3680,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emitRaw(self.spv.gpa, opcode, 4);
- self.func.body.writeOperand(spec.IdResultType, self.typeId(bool_ty_ref));
+ self.func.body.writeOperand(spec.IdResultType, bool_ty_id);
self.func.body.writeOperand(spec.IdResult, result_id);
self.func.body.writeOperand(spec.IdResultType, cmp_lhs_id);
self.func.body.writeOperand(spec.IdResultType, cmp_rhs_id);
@@ -3698,6 +3713,7 @@ const DeclGen = struct {
return try self.cmp(op, result_ty, ty, lhs_id, rhs_id);
}
+ /// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation.
fn bitCast(
self: *DeclGen,
dst_ty: Type,
@@ -3705,13 +3721,11 @@ const DeclGen = struct {
src_id: IdRef,
) !IdRef {
const mod = self.module;
- const src_ty_ref = try self.resolveType(src_ty, .direct);
- const dst_ty_ref = try self.resolveType(dst_ty, .direct);
- const src_key = self.spv.cache.lookup(src_ty_ref);
- const dst_key = self.spv.cache.lookup(dst_ty_ref);
+ const src_ty_id = try self.resolveType(src_ty, .direct);
+ const dst_ty_id = try self.resolveType(dst_ty, .direct);
const result_id = blk: {
- if (src_ty_ref == dst_ty_ref) {
+ if (src_ty_id == dst_ty_id) {
break :blk src_id;
}
@@ -3721,7 +3735,7 @@ const DeclGen = struct {
if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.typeId(dst_ty_ref),
+ .id_result_type = dst_ty_id,
.id_result = result_id,
.integer_value = src_id,
});
@@ -3731,10 +3745,11 @@ const DeclGen = struct {
// We can only use OpBitcast for specific conversions: between numerical types, and
// between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
// otherwise use a temporary and perform a pointer cast.
- if ((src_key.isNumericalType() and dst_key.isNumericalType()) or (src_key == .ptr_type and dst_key == .ptr_type)) {
+ const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod));
+ if (can_bitcast) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(dst_ty_ref),
+ .id_result_type = dst_ty_id,
.id_result = result_id,
.operand = src_id,
});
@@ -3742,13 +3757,13 @@ const DeclGen = struct {
break :blk result_id;
}
- const dst_ptr_ty_ref = try self.ptrType(dst_ty, .Function);
+ const dst_ptr_ty_id = try self.ptrType(dst_ty, .Function);
const tmp_id = try self.alloc(src_ty, .{ .storage_class = .Function });
try self.store(src_ty, tmp_id, src_id, .{});
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(dst_ptr_ty_ref),
+ .id_result_type = dst_ptr_ty_id,
.id_result = casted_ptr_id,
.operand = tmp_id,
});
@@ -3761,7 +3776,7 @@ const DeclGen = struct {
// should we change the representation of strange integers?
if (dst_ty.zigTypeTag(mod) == .Int) {
const info = self.arithmeticTypeInfo(dst_ty);
- return try self.normalize(dst_ty_ref, result_id, info);
+ return try self.normalize(dst_ty, result_id, info);
}
return result_id;
@@ -3811,7 +3826,7 @@ const DeclGen = struct {
// type, we don't need to normalize when growing the type. The
// representation is already the same.
if (dst_info.bits < src_info.bits) {
- result_id.* = try self.normalize(wip.ty_ref, value_id, dst_info);
+ result_id.* = try self.normalize(wip.ty, value_id, dst_info);
} else {
result_id.* = value_id;
}
@@ -3820,7 +3835,7 @@ const DeclGen = struct {
}
fn intFromPtr(self: *DeclGen, operand_id: IdRef) !IdRef {
- const result_type_id = try self.resolveTypeId(Type.usize);
+ const result_type_id = try self.resolveType(Type.usize, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
.id_result_type = result_type_id,
@@ -3841,21 +3856,21 @@ const DeclGen = struct {
const operand_ty = self.typeOf(ty_op.operand);
const operand_id = try self.resolve(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
- return try self.floatFromInt(result_ty_ref, operand_ty, operand_id);
+ return try self.floatFromInt(result_ty, operand_ty, operand_id);
}
- fn floatFromInt(self: *DeclGen, result_ty_ref: CacheRef, operand_ty: Type, operand_id: IdRef) !IdRef {
+ fn floatFromInt(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef {
const operand_info = self.arithmeticTypeInfo(operand_ty);
const result_id = self.spv.allocId();
+ const result_ty_id = try self.resolveType(result_ty, .direct);
switch (operand_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertSToF, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.signed_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertUToF, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.unsigned_value = operand_id,
}),
@@ -3872,16 +3887,16 @@ const DeclGen = struct {
fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef {
const result_info = self.arithmeticTypeInfo(result_ty);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
const result_id = self.spv.allocId();
switch (result_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertFToS, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.float_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertFToU, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.float_value = operand_id,
}),
@@ -3898,7 +3913,7 @@ const DeclGen = struct {
defer wip.deinit();
for (wip.results, 0..) |*result_id, i| {
const elem_id = try wip.elementAt(Type.bool, operand_id, i);
- result_id.* = try self.intFromBool(wip.ty_ref, elem_id);
+ result_id.* = try self.intFromBool(wip.ty, elem_id);
}
return try wip.finalize();
}
@@ -3907,7 +3922,7 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
- const dest_ty_id = try self.resolveTypeId(dest_ty);
+ const dest_ty_id = try self.resolveType(dest_ty, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpFConvert, .{
@@ -3957,18 +3972,17 @@ const DeclGen = struct {
const slice_ty = self.typeOfIndex(inst);
const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
- const elem_ptr_ty_ref = try self.resolveType(elem_ptr_ty, .direct);
- const size_ty_ref = try self.sizeType();
+ const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct);
const array_ptr_id = try self.resolve(ty_op.operand);
- const len_id = try self.constInt(size_ty_ref, array_ty.arrayLen(mod));
+ const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct);
const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod))
// Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
else
// Convert the pointer-to-array to a pointer to the first element.
- try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0});
+ try self.accessChain(elem_ptr_ty_id, array_ptr_id, &.{0});
return try self.constructStruct(
slice_ty,
@@ -4092,8 +4106,8 @@ const DeclGen = struct {
const array_ty = ty.childType(mod);
const elem_ty = array_ty.childType(mod);
const abi_size = elem_ty.abiSize(mod);
- const usize_ty_ref = try self.resolveType(Type.usize, .direct);
- return self.spv.constInt(usize_ty_ref, array_ty.arrayLenIncludingSentinel(mod) * abi_size);
+ const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size;
+ return try self.constInt(Type.usize, size, .direct);
},
.Many, .C => unreachable,
}
@@ -4142,10 +4156,10 @@ const DeclGen = struct {
const index_id = try self.resolve(bin_op.rhs);
const ptr_ty = self.typeOfIndex(inst);
- const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
+ const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
- return try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
+ return try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
}
fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -4158,10 +4172,10 @@ const DeclGen = struct {
const index_id = try self.resolve(bin_op.rhs);
const ptr_ty = slice_ty.slicePtrFieldType(mod);
- const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
+ const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
- const elem_ptr = try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
+ const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
}
@@ -4169,14 +4183,14 @@ const DeclGen = struct {
const mod = self.module;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
if (ptr_ty.isSinglePointer(mod)) {
// Pointer-to-array. In this case, the resulting pointer is not of the same type
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
- return try self.accessChainId(elem_ptr_ty_ref, ptr_id, &.{index_id});
+ return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id});
} else {
// Resulting pointer type is the same as the ptr_ty, so use ptrAccessChain
- return try self.ptrAccessChain(elem_ptr_ty_ref, ptr_id, index_id, &.{});
+ return try self.ptrAccessChain(elem_ptr_ty_id, ptr_id, index_id, &.{});
}
}
@@ -4209,11 +4223,11 @@ const DeclGen = struct {
// For now, just generate a temporary and use that.
// TODO: This backend probably also should use isByRef from llvm...
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, .Function);
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, .Function);
const tmp_id = try self.alloc(array_ty, .{ .storage_class = .Function });
try self.store(array_ty, tmp_id, array_id, .{});
- const elem_ptr_id = try self.accessChainId(elem_ptr_ty_ref, tmp_id, &.{index_id});
+ const elem_ptr_id = try self.accessChainId(elem_ptr_ty_id, tmp_id, &.{index_id});
return try self.load(elem_ty, elem_ptr_id, .{});
}
@@ -4238,13 +4252,13 @@ const DeclGen = struct {
const scalar_ty = vector_ty.scalarType(mod);
const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod));
- const scalar_ptr_ty_ref = try self.ptrType(scalar_ty, storage_class);
+ const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class);
const vector_ptr = try self.resolve(data.vector_ptr);
const index = try self.resolve(extra.lhs);
const operand = try self.resolve(extra.rhs);
- const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_ref, vector_ptr, &.{index});
+ const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
try self.store(scalar_ty, elem_ptr_id, operand, .{
.is_volatile = vector_ptr_ty.isVolatilePtr(mod),
});
@@ -4260,7 +4274,7 @@ const DeclGen = struct {
if (layout.tag_size == 0) return;
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
- const tag_ptr_ty_ref = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
+ const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
const union_ptr_id = try self.resolve(bin_op.lhs);
const new_tag_id = try self.resolve(bin_op.rhs);
@@ -4268,7 +4282,7 @@ const DeclGen = struct {
if (!layout.has_payload) {
try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
} else {
- const ptr_id = try self.accessChain(tag_ptr_ty_ref, union_ptr_id, &.{layout.tag_index});
+ const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index});
try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
}
}
@@ -4298,6 +4312,8 @@ const DeclGen = struct {
// union type, then get the field pointer and pointer-cast it to the
// right type to store it. Finally load the entire union.
+ // Note: The result here is not cached, because it generates runtime code.
+
const mod = self.module;
const ip = &mod.intern_pool;
const union_ty = mod.typeToUnion(ty).?;
@@ -4316,28 +4332,26 @@ const DeclGen = struct {
} else 0;
if (!layout.has_payload) {
- const tag_ty_ref = try self.resolveType(tag_ty, .direct);
- return try self.constInt(tag_ty_ref, tag_int);
+ return try self.constInt(tag_ty, tag_int, .direct);
}
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
if (layout.tag_size != 0) {
- const tag_ty_ref = try self.resolveType(tag_ty, .direct);
- const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function);
- const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
- const tag_id = try self.constInt(tag_ty_ref, tag_int);
+ const tag_ptr_ty_id = try self.ptrType(tag_ty, .Function);
+ const ptr_id = try self.accessChain(tag_ptr_ty_id, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
+ const tag_id = try self.constInt(tag_ty, tag_int, .direct);
try self.store(tag_ty, ptr_id, tag_id, .{});
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
- const active_pl_ptr_ty_ref = try self.ptrType(payload_ty, .Function);
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
+ const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(active_pl_ptr_ty_ref),
+ .id_result_type = active_pl_ptr_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4396,13 +4410,13 @@ const DeclGen = struct {
const tmp_id = try self.alloc(object_ty, .{ .storage_class = .Function });
try self.store(object_ty, tmp_id, object_id, .{});
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
- const active_pl_ptr_ty_ref = try self.ptrType(field_ty, .Function);
+ const active_pl_ptr_ty_id = try self.ptrType(field_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(active_pl_ptr_ty_ref),
+ .id_result_type = active_pl_ptr_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4419,9 +4433,7 @@ const DeclGen = struct {
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const parent_ty = ty_pl.ty.toType().childType(mod);
- const res_ty = try self.resolveType(ty_pl.ty.toType(), .indirect);
- const usize_ty = Type.usize;
- const usize_ty_ref = try self.resolveType(usize_ty, .direct);
+ const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect);
const field_ptr = try self.resolve(extra.field_ptr);
const field_ptr_int = try self.intFromPtr(field_ptr);
@@ -4430,13 +4442,13 @@ const DeclGen = struct {
const base_ptr_int = base_ptr_int: {
if (field_offset == 0) break :base_ptr_int field_ptr_int;
- const field_offset_id = try self.constInt(usize_ty_ref, field_offset);
- break :base_ptr_int try self.binOpSimple(usize_ty, field_ptr_int, field_offset_id, .OpISub);
+ const field_offset_id = try self.constInt(Type.usize, field_offset, .direct);
+ break :base_ptr_int try self.binOpSimple(Type.usize, field_ptr_int, field_offset_id, .OpISub);
};
const base_ptr = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.spv.resultId(res_ty),
+ .id_result_type = result_ty_id,
.id_result = base_ptr,
.integer_value = base_ptr_int,
});
@@ -4451,7 +4463,7 @@ const DeclGen = struct {
object_ptr: IdRef,
field_index: u32,
) !IdRef {
- const result_ty_ref = try self.resolveType(result_ptr_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ptr_ty, .direct);
const mod = self.module;
const object_ty = object_ptr_ty.childType(mod);
@@ -4459,7 +4471,7 @@ const DeclGen = struct {
.Struct => switch (object_ty.containerLayout(mod)) {
.@"packed" => unreachable, // TODO
else => {
- return try self.accessChain(result_ty_ref, object_ptr, &.{field_index});
+ return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
},
.Union => switch (object_ty.containerLayout(mod)) {
@@ -4469,16 +4481,16 @@ const DeclGen = struct {
if (!layout.has_payload) {
// Asked to get a pointer to a zero-sized field. Just lower this
// to undefined, there is no reason to make it be a valid pointer.
- return try self.spv.constUndef(result_ty_ref);
+ return try self.spv.constUndef(result_ty_id);
}
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, storage_class);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, object_ptr, &.{layout.payload_index});
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4506,7 +4518,7 @@ const DeclGen = struct {
};
// Allocate a function-local variable, with possible initializer.
- // This function returns a pointer to a variable of type `ty_ref`,
+ // This function returns a pointer to a variable of type `ty`,
// which is in the Generic address space. The variable is actually
// placed in the Function address space.
fn alloc(
@@ -4514,13 +4526,13 @@ const DeclGen = struct {
ty: Type,
options: AllocOptions,
) !IdRef {
- const ptr_fn_ty_ref = try self.ptrType(ty, .Function);
+ const ptr_fn_ty_id = try self.ptrType(ty, .Function);
// SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
// directly generate them into func.prologue instead of the body.
const var_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
- .id_result_type = self.typeId(ptr_fn_ty_ref),
+ .id_result_type = ptr_fn_ty_id,
.id_result = var_id,
.storage_class = .Function,
.initializer = options.initializer,
@@ -4533,9 +4545,9 @@ const DeclGen = struct {
switch (options.storage_class) {
.Generic => {
- const ptr_gn_ty_ref = try self.ptrType(ty, .Generic);
+ const ptr_gn_ty_id = try self.ptrType(ty, .Generic);
// Convert to a generic pointer
- return self.castToGeneric(self.typeId(ptr_gn_ty_ref), var_id);
+ return self.castToGeneric(ptr_gn_ty_id, var_id);
},
.Function => return var_id,
else => unreachable,
@@ -4563,9 +4575,9 @@ const DeclGen = struct {
assert(self.control_flow == .structured);
const result_id = self.spv.allocId();
- const block_id_ty_ref = try self.intType(.unsigned, 32);
+ const block_id_ty_id = try self.resolveType(Type.u32, .direct);
try self.func.body.emitRaw(self.spv.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent...
- self.func.body.writeOperand(spec.IdResultType, self.typeId(block_id_ty_ref));
+ self.func.body.writeOperand(spec.IdResultType, block_id_ty_id);
self.func.body.writeOperand(spec.IdRef, result_id);
for (incoming) |incoming_block| {
@@ -4663,8 +4675,8 @@ const DeclGen = struct {
// Make sure that we are still in a block when exiting the function.
// TODO: Can we get rid of that?
try self.beginSpvBlock(self.spv.allocId());
- const block_id_ty_ref = try self.intType(.unsigned, 32);
- return try self.spv.constUndef(block_id_ty_ref);
+ const block_id_ty_id = try self.resolveType(Type.u32, .direct);
+ return try self.spv.constUndef(block_id_ty_id);
}
// The top-most merge actually only has a single source, the
@@ -4745,7 +4757,7 @@ const DeclGen = struct {
assert(block.label != null);
const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(ty);
+ const result_type_id = try self.resolveType(ty, .direct);
try self.func.body.emitRaw(
self.spv.gpa,
@@ -4781,12 +4793,11 @@ const DeclGen = struct {
assert(cf.block_stack.items.len > 0);
// Check if the target of the branch was this current block.
- const block_id_ty_ref = try self.intType(.unsigned, 32);
- const this_block = try self.constInt(block_id_ty_ref, @intFromEnum(inst));
+ const this_block = try self.constInt(Type.u32, @intFromEnum(inst), .direct);
const jump_to_this_block_id = self.spv.allocId();
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
try self.func.body.emit(self.spv.gpa, .OpIEqual, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = jump_to_this_block_id,
.operand_1 = next_block,
.operand_2 = this_block,
@@ -4862,8 +4873,7 @@ const DeclGen = struct {
try self.store(operand_ty, block_result_var_id, operand_id, .{});
}
- const block_id_ty_ref = try self.intType(.unsigned, 32);
- const next_block = try self.constInt(block_id_ty_ref, @intFromEnum(br.block_inst));
+ const next_block = try self.constInt(Type.u32, @intFromEnum(br.block_inst), .direct);
try self.structuredBreak(next_block);
},
.unstructured => |cf| {
@@ -5026,8 +5036,7 @@ const DeclGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const no_err_id = try self.constInt(err_ty_ref, 0);
+ const no_err_id = try self.constInt(Type.anyerror, 0, .direct);
return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id });
} else {
return try self.func.body.emit(self.spv.gpa, .OpReturn, {});
@@ -5051,8 +5060,7 @@ const DeclGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const no_err_id = try self.constInt(err_ty_ref, 0);
+ const no_err_id = try self.constInt(Type.anyerror, 0, .direct);
return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id });
} else {
return try self.func.body.emit(self.spv.gpa, .OpReturn, {});
@@ -5076,8 +5084,7 @@ const DeclGen = struct {
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -5087,10 +5094,10 @@ const DeclGen = struct {
else
err_union_id;
- const zero_id = try self.constInt(err_ty_ref, 0);
+ const zero_id = try self.constInt(Type.anyerror, 0, .direct);
const is_err_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = is_err_id,
.operand_1 = err_id,
.operand_2 = zero_id,
@@ -5142,11 +5149,11 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const err_union_ty = self.typeOf(ty_op.operand);
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
+ const err_ty_id = try self.resolveType(Type.anyerror, .direct);
if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
// No error possible, so just return undefined.
- return try self.spv.constUndef(err_ty_ref);
+ return try self.spv.constUndef(err_ty_id);
}
const payload_ty = err_union_ty.errorUnionPayload(mod);
@@ -5185,11 +5192,11 @@ const DeclGen = struct {
return operand_id;
}
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
var members: [2]IdRef = undefined;
members[eu_layout.errorFieldIndex()] = operand_id;
- members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_ref);
+ members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_id);
var types: [2]Type = undefined;
types[eu_layout.errorFieldIndex()] = Type.anyerror;
@@ -5203,15 +5210,14 @@ const DeclGen = struct {
const err_union_ty = self.typeOfIndex(inst);
const operand_id = try self.resolve(ty_op.operand);
const payload_ty = self.typeOf(ty_op.operand);
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
- return try self.constInt(err_ty_ref, 0);
+ return try self.constInt(Type.anyerror, 0, .direct);
}
var members: [2]IdRef = undefined;
- members[eu_layout.errorFieldIndex()] = try self.constInt(err_ty_ref, 0);
+ members[eu_layout.errorFieldIndex()] = try self.constInt(Type.anyerror, 0, .direct);
members[eu_layout.payloadFieldIndex()] = try self.convertToIndirect(payload_ty, operand_id);
var types: [2]Type = undefined;
@@ -5229,7 +5235,7 @@ const DeclGen = struct {
const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty;
const payload_ty = optional_ty.optionalChild(mod);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
if (optional_ty.optionalReprIsPayload(mod)) {
// Pointer payload represents nullability: pointer or slice.
@@ -5248,8 +5254,8 @@ const DeclGen = struct {
else
loaded_id;
- const payload_ty_ref = try self.resolveType(ptr_ty, .direct);
- const null_id = try self.spv.constNull(payload_ty_ref);
+ const payload_ty_id = try self.resolveType(ptr_ty, .direct);
+ const null_id = try self.spv.constNull(payload_ty_id);
const op: std.math.CompareOperator = switch (pred) {
.is_null => .eq,
.is_non_null => .neq,
@@ -5261,8 +5267,8 @@ const DeclGen = struct {
if (is_pointer) {
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod));
- const bool_ptr_ty = try self.ptrType(Type.bool, storage_class);
- const tag_ptr_id = try self.accessChain(bool_ptr_ty, operand_id, &.{1});
+ const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class);
+ const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1});
break :blk try self.load(Type.bool, tag_ptr_id, .{});
}
@@ -5283,7 +5289,7 @@ const DeclGen = struct {
// Invert condition
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand = is_non_null_id,
});
@@ -5305,8 +5311,7 @@ const DeclGen = struct {
const payload_ty = err_union_ty.errorUnionPayload(mod);
const eu_layout = self.errorUnionLayout(payload_ty);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const error_id = if (!eu_layout.payload_has_bits)
operand_id
@@ -5315,10 +5320,10 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
const operands = .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = error_id,
- .operand_2 = try self.constInt(err_ty_ref, 0),
+ .operand_2 = try self.constInt(Type.anyerror, 0, .direct),
};
switch (pred) {
.is_err => try self.func.body.emit(self.spv.gpa, .OpINotEqual, operands),
@@ -5351,7 +5356,7 @@ const DeclGen = struct {
const optional_ty = operand_ty.childType(mod);
const payload_ty = optional_ty.optionalChild(mod);
const result_ty = self.typeOfIndex(inst);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// There is no payload, but we still need to return a valid pointer.
@@ -5364,7 +5369,7 @@ const DeclGen = struct {
return try self.bitCast(result_ty, operand_ty, operand_id);
}
- return try self.accessChain(result_ty_ref, operand_id, &.{0});
+ return try self.accessChain(result_ty_id, operand_id, &.{0});
}
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -5440,7 +5445,7 @@ const DeclGen = struct {
};
// First, pre-allocate the labels for the cases.
- const first_case_label = self.spv.allocIds(num_cases);
+ const case_labels = self.spv.allocIds(num_cases);
// We always need the default case - if zig has none, we will generate unreachable there.
const default = self.spv.allocId();
@@ -5471,7 +5476,7 @@ const DeclGen = struct {
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
- const label: IdRef = @enumFromInt(@intFromEnum(first_case_label) + case_i);
+ const label = case_labels.at(case_i);
for (items) |item| {
const value = (try self.air.value(item, mod)) orelse unreachable;
@@ -5511,7 +5516,7 @@ const DeclGen = struct {
const case_body: []const Air.Inst.Index = @ptrCast(self.air.extra[case.end + items.len ..][0..case.data.body_len]);
extra_index = case.end + case.data.items_len + case_body.len;
- const label: IdResult = @enumFromInt(@intFromEnum(first_case_label) + case_i);
+ const label = case_labels.at(case_i);
try self.beginSpvBlock(label);
@@ -5566,9 +5571,8 @@ const DeclGen = struct {
const mod = self.module;
const decl = mod.declPtr(self.decl_index);
const path = decl.getFileScope(mod).sub_file_path;
- const src_fname_id = try self.spv.resolveSourceFileName(path);
try self.func.body.emit(self.spv.gpa, .OpLine, .{
- .file = src_fname_id,
+ .file = try self.spv.resolveString(path),
.line = self.base_line + dbg_stmt.line + 1,
.column = dbg_stmt.column + 1,
});
@@ -5737,7 +5741,7 @@ const DeclGen = struct {
const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_ref = try self.resolveFnReturnType(Type.fromInterned(return_type));
+ const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type));
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -5758,7 +5762,7 @@ const DeclGen = struct {
}
try self.func.body.emit(self.spv.gpa, .OpFunctionCall, .{
- .id_result_type = self.typeId(result_type_ref),
+ .id_result_type = result_type_id,
.id_result = result_id,
.function = callee_id,
.id_ref_3 = params[0..n_params],