aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/spirv.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen/spirv.zig')
-rw-r--r--src/codegen/spirv.zig1263
1 files changed, 953 insertions, 310 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index a4d3ec7f46..0f0adf5646 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -52,18 +52,136 @@ const Block = struct {
const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, *Block);
-/// Maps Zig decl indices to linking SPIR-V linking information.
-pub const DeclLinkMap = std.AutoHashMap(Module.Decl.Index, SpvModule.Decl.Index);
+/// This structure holds information that is relevant to the entire compilation,
+/// in contrast to `DeclGen`, which only holds relevant information about a
+/// single decl.
+pub const Object = struct {
+ /// A general-purpose allocator that can be used for any allocation for this Object.
+ gpa: Allocator,
+
+ /// the SPIR-V module that represents the final binary.
+ spv: SpvModule,
+
+ /// The Zig module that this object file is generated for.
+ /// A map of Zig decl indices to SPIR-V decl indices.
+ decl_link: std.AutoHashMapUnmanaged(Decl.Index, SpvModule.Decl.Index) = .{},
+
+ /// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices.
+ anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
+
+ /// A map that maps AIR intern pool indices to SPIR-V cache references (which
+ /// is basically the same thing except for SPIR-V).
+ /// This map is typically only used for structures that are deemed heavy enough
+ /// that it is worth to store them here. The SPIR-V module also interns types,
+ /// and so the main purpose of this map is to avoid recomputation and to
+ /// cache extra information about the type rather than to aid in validity
+ /// of the SPIR-V module.
+ type_map: TypeMap = .{},
+
+ pub fn init(gpa: Allocator) Object {
+ return .{
+ .gpa = gpa,
+ .spv = SpvModule.init(gpa),
+ };
+ }
+
+ pub fn deinit(self: *Object) void {
+ self.spv.deinit();
+ self.decl_link.deinit(self.gpa);
+ self.anon_decl_link.deinit(self.gpa);
+ self.type_map.deinit(self.gpa);
+ }
+
+ fn genDecl(
+ self: *Object,
+ mod: *Module,
+ decl_index: Decl.Index,
+ air: Air,
+ liveness: Liveness,
+ ) !void {
+ var decl_gen = DeclGen{
+ .gpa = self.gpa,
+ .object = self,
+ .module = mod,
+ .spv = &self.spv,
+ .decl_index = decl_index,
+ .air = air,
+ .liveness = liveness,
+ .type_map = &self.type_map,
+ .current_block_label_id = undefined,
+ };
+ defer decl_gen.deinit();
+
+ decl_gen.genDecl() catch |err| switch (err) {
+ error.CodegenFail => {
+ try mod.failed_decls.put(mod.gpa, decl_index, decl_gen.error_msg.?);
+ },
+ else => |other| {
+ // There might be an error that happened *after* self.error_msg
+ // was already allocated, so be sure to free it.
+ if (decl_gen.error_msg) |error_msg| {
+ error_msg.deinit(mod.gpa);
+ }
+
+ return other;
+ },
+ };
+ }
+
+ pub fn updateFunc(
+ self: *Object,
+ mod: *Module,
+ func_index: InternPool.Index,
+ air: Air,
+ liveness: Liveness,
+ ) !void {
+ const decl_index = mod.funcInfo(func_index).owner_decl;
+ // TODO: Separate types for generating decls and functions?
+ try self.genDecl(mod, decl_index, air, liveness);
+ }
+
+ pub fn updateDecl(
+ self: *Object,
+ mod: *Module,
+ decl_index: Decl.Index,
+ ) !void {
+ try self.genDecl(mod, decl_index, undefined, undefined);
+ }
+
+ /// Fetch or allocate a result id for decl index. This function also marks the decl as alive.
+ /// Note: Function does not actually generate the decl, it just allocates an index.
+ pub fn resolveDecl(self: *Object, mod: *Module, decl_index: Decl.Index) !SpvModule.Decl.Index {
+ const decl = mod.declPtr(decl_index);
+ try mod.markDeclAlive(decl);
+
+ const entry = try self.decl_link.getOrPut(self.gpa, decl_index);
+ if (!entry.found_existing) {
+ // TODO: Extern fn?
+ const kind: SpvModule.DeclKind = if (decl.val.isFuncBody(mod))
+ .func
+ else
+ .global;
+
+ entry.value_ptr.* = try self.spv.allocDecl(kind);
+ }
+
+ return entry.value_ptr.*;
+ }
+};
/// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that.
-pub const DeclGen = struct {
+const DeclGen = struct {
/// A general-purpose allocator that can be used for any allocations for this DeclGen.
gpa: Allocator,
+ /// The object that this decl is generated into.
+ object: *Object,
+
/// The Zig module that we are generating decls for.
module: *Module,
/// The SPIR-V module that instructions should be emitted into.
+ /// This is the same as `self.object.spv`, repeated here for brevity.
spv: *SpvModule,
/// The decl we are currently generating code for.
@@ -77,27 +195,19 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
- /// Maps Zig Decl indices to SPIR-V globals.
- decl_link: *DeclLinkMap,
-
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
/// A counter to keep track of how many `arg` instructions we've seen yet.
- next_arg_index: u32,
+ next_arg_index: u32 = 0,
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap = .{},
- /// A map that maps AIR intern pool indices to SPIR-V cache references (which
- /// is basically the same thing except for SPIR-V).
- /// This map is typically only used for structures that are deemed heavy enough
- /// that it is worth to store them here. The SPIR-V module also interns types,
- /// and so the main purpose of this map is to avoid recomputation and to
- /// cache extra information about the type rather than to aid in validity
- /// of the SPIR-V module.
- type_map: TypeMap = .{},
+ /// A map that maps AIR intern pool indices to SPIR-V cache references.
+ /// See Object.type_map
+ type_map: *TypeMap,
/// We need to keep track of result ids for block labels, as well as the 'incoming'
/// blocks for a block.
@@ -115,7 +225,7 @@ pub const DeclGen = struct {
/// If `gen` returned `Error.CodegenFail`, this contains an explanatory message.
/// Memory is owned by `module.gpa`.
- error_msg: ?*Module.ErrorMsg,
+ error_msg: ?*Module.ErrorMsg = null,
/// Possible errors the `genDecl` function may return.
const Error = error{ CodegenFail, OutOfMemory };
@@ -154,6 +264,12 @@ pub const DeclGen = struct {
/// This is the actual number of bits of the type, not the size of the backing integer.
bits: u16,
+ /// The number of bits required to store the type.
+ /// For `integer` and `float`, this is equal to `bits`.
+ /// For `strange_integer` and `bool` this is the size of the backing integer.
+ /// For `composite_integer` this is 0 (TODO)
+ backing_bits: u16,
+
/// Whether the type is a vector.
is_vector: bool,
@@ -175,65 +291,10 @@ pub const DeclGen = struct {
indirect,
};
- /// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
- /// only set when `gen` is called.
- pub fn init(
- allocator: Allocator,
- module: *Module,
- spv: *SpvModule,
- decl_link: *DeclLinkMap,
- ) DeclGen {
- return .{
- .gpa = allocator,
- .module = module,
- .spv = spv,
- .decl_index = undefined,
- .air = undefined,
- .liveness = undefined,
- .decl_link = decl_link,
- .next_arg_index = undefined,
- .current_block_label_id = undefined,
- .error_msg = undefined,
- };
- }
-
- /// Generate the code for `decl`. If a reportable error occurred during code generation,
- /// a message is returned by this function. Callee owns the memory. If this function
- /// returns such a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
- // Reset internal resources, we don't want to re-allocate these.
- self.decl_index = decl_index;
- self.air = air;
- self.liveness = liveness;
- self.args.items.len = 0;
- self.next_arg_index = 0;
- self.inst_results.clearRetainingCapacity();
- self.blocks.clearRetainingCapacity();
- self.current_block_label_id = undefined;
- self.func.reset();
- self.base_line_stack.items.len = 0;
- self.error_msg = null;
-
- self.genDecl() catch |err| switch (err) {
- error.CodegenFail => return self.error_msg,
- else => |others| {
- // There might be an error that happened *after* self.error_msg
- // was already allocated, so be sure to free it.
- if (self.error_msg) |error_msg| {
- error_msg.deinit(self.module.gpa);
- }
- return others;
- },
- };
-
- return null;
- }
-
/// Free resources owned by the DeclGen.
pub fn deinit(self: *DeclGen) void {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
- self.type_map.deinit(self.gpa);
self.blocks.deinit(self.gpa);
self.func.deinit(self.gpa);
self.base_line_stack.deinit(self.gpa);
@@ -269,7 +330,7 @@ pub const DeclGen = struct {
.func => |func| func.owner_decl,
else => unreachable,
};
- const spv_decl_index = try self.resolveDecl(fn_decl_index);
+ const spv_decl_index = try self.object.resolveDecl(mod, fn_decl_index);
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
return self.spv.declPtr(spv_decl_index).result_id;
}
@@ -280,25 +341,87 @@ pub const DeclGen = struct {
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
- /// Fetch or allocate a result id for decl index. This function also marks the decl as alive.
- /// Note: Function does not actually generate the decl.
- fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index {
+ fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index, storage_class: StorageClass) !IdRef {
+ // TODO: This cannot be a function at this point, but it should probably be handled anyway.
+ const spv_decl_index = blk: {
+ const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, storage_class });
+ if (entry.found_existing) {
+ try self.func.decl_deps.put(self.spv.gpa, entry.value_ptr.*, {});
+ return self.spv.declPtr(entry.value_ptr.*).result_id;
+ }
+
+ const spv_decl_index = try self.spv.allocDecl(.global);
+ try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
+ entry.value_ptr.* = spv_decl_index;
+ break :blk spv_decl_index;
+ };
+
const mod = self.module;
- const decl = mod.declPtr(decl_index);
- try mod.markDeclAlive(decl);
+ const ty = mod.intern_pool.typeOf(val).toType();
+ const ty_ref = try self.resolveType(ty, .indirect);
+ const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class);
- const entry = try self.decl_link.getOrPut(decl_index);
- if (!entry.found_existing) {
- // TODO: Extern fn?
- const kind: SpvModule.DeclKind = if (decl.val.isFuncBody(mod))
- .func
- else
- .global;
+ const var_id = self.spv.declPtr(spv_decl_index).result_id;
- entry.value_ptr.* = try self.spv.allocDecl(kind);
- }
+ const section = &self.spv.sections.types_globals_constants;
+ try section.emit(self.spv.gpa, .OpVariable, .{
+ .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result = var_id,
+ .storage_class = storage_class,
+ });
- return entry.value_ptr.*;
+ // TODO: At some point we will be able to generate this all constant here, but then all of
+ // constant() will need to be implemented such that it doesn't generate any at-runtime code.
+ // NOTE: Because this is a global, we really only want to initialize it once. Therefore the
+ // constant lowering of this value will need to be deferred to some other function, which
+ // is then added to the list of initializers using endGlobal().
+
+ // Save the current state so that we can temporarily generate into a different function.
+ // TODO: This should probably be made a little more robust.
+ const func = self.func;
+ defer self.func = func;
+ const block_label_id = self.current_block_label_id;
+ defer self.current_block_label_id = block_label_id;
+
+ self.func = .{};
+
+ // TODO: Merge this with genDecl?
+ const begin = self.spv.beginGlobal();
+
+ const void_ty_ref = try self.resolveType(Type.void, .direct);
+ const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
+ .return_type = void_ty_ref,
+ .parameters = &.{},
+ } });
+
+ const initializer_id = self.spv.allocId();
+ try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
+ .id_result_type = self.typeId(void_ty_ref),
+ .id_result = initializer_id,
+ .function_control = .{},
+ .function_type = self.typeId(initializer_proto_ty_ref),
+ });
+ const root_block_id = self.spv.allocId();
+ try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
+ .id_result = root_block_id,
+ });
+ self.current_block_label_id = root_block_id;
+
+ const val_id = try self.constant(ty, val.toValue(), .indirect);
+ try self.func.body.emit(self.spv.gpa, .OpStore, .{
+ .pointer = var_id,
+ .object = val_id,
+ });
+
+ self.spv.endGlobal(spv_decl_index, begin, var_id, initializer_id);
+ try self.func.body.emit(self.spv.gpa, .OpReturn, {});
+ try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
+ try self.spv.addFunction(spv_decl_index, self.func);
+
+ try self.spv.debugNameFmt(var_id, "__anon_{d}", .{@intFromEnum(val)});
+ try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
+
+ return var_id;
}
/// Start a new SPIR-V block, Emits the label of the new block, and stores which
@@ -376,12 +499,14 @@ pub const DeclGen = struct {
return switch (ty.zigTypeTag(mod)) {
.Bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
+ .backing_bits = self.backingIntBits(1).?,
.is_vector = false,
.signedness = .unsigned, // Technically, but doesn't matter for this class.
.class = .bool,
},
.Float => ArithmeticTypeInfo{
.bits = ty.floatBits(target),
+ .backing_bits = ty.floatBits(target), // TODO: F80?
.is_vector = false,
.signedness = .signed, // Technically, but doesn't matter for this class.
.class = .float,
@@ -392,6 +517,7 @@ pub const DeclGen = struct {
const maybe_backing_bits = self.backingIntBits(int_info.bits);
break :blk ArithmeticTypeInfo{
.bits = int_info.bits,
+ .backing_bits = maybe_backing_bits orelse 0,
.is_vector = false,
.signedness = int_info.signedness,
.class = if (maybe_backing_bits) |backing_bits|
@@ -403,8 +529,19 @@ pub const DeclGen = struct {
.composite_integer,
};
},
+ .Enum => return self.arithmeticTypeInfo(ty.intTagType(mod)),
// As of yet, there is no vector support in the self-hosted compiler.
- .Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}),
+ .Vector => blk: {
+ const child_type = ty.childType(mod);
+ const child_ty_info = try self.arithmeticTypeInfo(child_type);
+ break :blk ArithmeticTypeInfo{
+ .bits = child_ty_info.bits,
+ .backing_bits = child_ty_info.backing_bits,
+ .is_vector = true,
+ .signedness = child_ty_info.signedness,
+ .class = child_ty_info.class,
+ };
+ },
// TODO: For which types is this the case?
// else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmt(self.module)}),
else => unreachable,
@@ -482,7 +619,7 @@ pub const DeclGen = struct {
return result_id;
}
- /// Construct a struct at runtime.
+ /// Construct an array at runtime.
/// result_ty_ref must be an array type.
/// Constituents should be in `indirect` representation (as the elements of an array should be).
/// Result is in `direct` representation.
@@ -521,59 +658,6 @@ pub const DeclGen = struct {
return result_id;
}
- fn constructDeclRef(self: *DeclGen, ty: Type, decl_index: Decl.Index) !IdRef {
- const mod = self.module;
- const ty_ref = try self.resolveType(ty, .direct);
- const ty_id = self.typeId(ty_ref);
- const decl = mod.declPtr(decl_index);
- const spv_decl_index = try self.resolveDecl(decl_index);
- switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
- .func => {
- // TODO: Properly lower function pointers. For now we are going to hack around it and
- // just generate an empty pointer. Function pointers are represented by a pointer to usize.
- // TODO: Add dependency
- return try self.spv.constNull(ty_ref);
- },
- .extern_func => unreachable, // TODO
- else => {
- const decl_id = self.spv.declPtr(spv_decl_index).result_id;
- try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
-
- const final_storage_class = spvStorageClass(decl.@"addrspace");
-
- const decl_ty_ref = try self.resolveType(decl.ty, .indirect);
- const decl_ptr_ty_ref = try self.spv.ptrType(decl_ty_ref, final_storage_class);
-
- const ptr_id = switch (final_storage_class) {
- .Generic => blk: {
- // Pointer should be Generic, but is actually placed in CrossWorkgroup.
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
- .id_result_type = self.typeId(decl_ptr_ty_ref),
- .id_result = result_id,
- .pointer = decl_id,
- });
- break :blk result_id;
- },
- else => decl_id,
- };
-
- if (decl_ptr_ty_ref != ty_ref) {
- // Differing pointer types, insert a cast.
- const casted_ptr_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = ty_id,
- .id_result = casted_ptr_id,
- .operand = ptr_id,
- });
- return casted_ptr_id;
- } else {
- return ptr_id;
- }
- },
- }
- }
-
/// This function generates a load for a constant in direct (ie, non-memory) representation.
/// When the constant is simple, it can be generated directly using OpConstant instructions.
/// When the constant is more complicated however, it needs to be constructed using multiple values. This
@@ -738,7 +822,7 @@ pub const DeclGen = struct {
return try self.constructStruct(result_ty_ref, &.{ payload_id, has_pl_id });
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
- .array_type => |array_type| {
+ inline .array_type, .vector_type => |array_type, tag| {
const elem_ty = array_type.child.toType();
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
@@ -765,9 +849,14 @@ pub const DeclGen = struct {
}
},
}
- if (array_type.sentinel != .none) {
- constituents[constituents.len - 1] = try self.constant(elem_ty, array_type.sentinel.toValue(), .indirect);
+
+ switch (tag) {
+ inline .array_type => if (array_type.sentinel != .none) {
+ constituents[constituents.len - 1] = try self.constant(elem_ty, array_type.sentinel.toValue(), .indirect);
+ },
+ else => {},
}
+
return try self.constructArray(result_ty_ref, constituents);
},
.struct_type => {
@@ -796,7 +885,7 @@ pub const DeclGen = struct {
return try self.constructStruct(result_ty_ref, constituents.items);
},
- .vector_type, .anon_struct_type => unreachable, // TODO
+ .anon_struct_type => unreachable, // TODO
else => unreachable,
},
.un => |un| {
@@ -817,9 +906,9 @@ pub const DeclGen = struct {
const result_ty_ref = try self.resolveType(ptr_ty, .direct);
const mod = self.module;
switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
- .decl => |decl| return try self.constructDeclRef(ptr_ty, decl),
- .anon_decl => @panic("TODO"),
- .mut_decl => |decl_mut| return try self.constructDeclRef(ptr_ty, decl_mut.decl),
+ .decl => |decl| return try self.constantDeclRef(ptr_ty, decl),
+ .mut_decl => |decl_mut| return try self.constantDeclRef(ptr_ty, decl_mut.decl),
+ .anon_decl => |anon_decl| return try self.constantAnonDeclRef(ptr_ty, anon_decl),
.int => |int| {
const ptr_id = self.spv.allocId();
// TODO: This can probably be an OpSpecConstantOp Bitcast, but
@@ -836,16 +925,156 @@ pub const DeclGen = struct {
.opt_payload => unreachable, // TODO
.comptime_field => unreachable,
.elem => |elem_ptr| {
- const elem_ptr_ty = mod.intern_pool.typeOf(elem_ptr.base).toType();
- const parent_ptr_id = try self.constantPtr(elem_ptr_ty, elem_ptr.base.toValue());
+ const parent_ptr_ty = mod.intern_pool.typeOf(elem_ptr.base).toType();
+ const parent_ptr_id = try self.constantPtr(parent_ptr_ty, elem_ptr.base.toValue());
const size_ty_ref = try self.sizeType();
const index_id = try self.constInt(size_ty_ref, elem_ptr.index);
- return self.ptrAccessChain(result_ty_ref, parent_ptr_id, index_id, &.{});
+
+ const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
+
+ // TODO: Can we consolidate this in ptrElemPtr?
+ const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
+ const elem_ty_ref = try self.resolveType(elem_ty, .direct);
+ const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
+
+ if (elem_ptr_ty_ref == result_ty_ref) {
+ return elem_ptr_id;
+ }
+ // This may happen when we have pointer-to-array and the result is
+ // another pointer-to-array instead of a pointer-to-element.
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = self.typeId(result_ty_ref),
+ .id_result = result_id,
+ .operand = elem_ptr_id,
+ });
+ return result_id;
},
.field => unreachable, // TODO
}
}
+ fn constantAnonDeclRef(self: *DeclGen, ty: Type, decl_val: InternPool.Index) !IdRef {
+ // TODO: Merge this function with constantDeclRef.
+
+ const mod = self.module;
+ const ip = &mod.intern_pool;
+ const ty_ref = try self.resolveType(ty, .direct);
+ const decl_ty = ip.typeOf(decl_val).toType();
+
+ if (decl_val.toValue().getFunction(mod)) |func| {
+ _ = func;
+ unreachable; // TODO
+ } else if (decl_val.toValue().getExternFunc(mod)) |func| {
+ _ = func;
+ unreachable;
+ }
+
+ // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
+ if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ // Pointer to nothing - return undefoined
+ return self.spv.constUndef(ty_ref);
+ }
+
+ if (decl_ty.zigTypeTag(mod) == .Fn) {
+ unreachable; // TODO
+ }
+
+ const final_storage_class = spvStorageClass(ty.ptrAddressSpace(mod));
+ const actual_storage_class = switch (final_storage_class) {
+ .Generic => .CrossWorkgroup,
+ else => |other| other,
+ };
+
+ const decl_id = try self.resolveAnonDecl(decl_val, actual_storage_class);
+ const decl_ty_ref = try self.resolveType(decl_ty, .indirect);
+ const decl_ptr_ty_ref = try self.spv.ptrType(decl_ty_ref, final_storage_class);
+
+ const ptr_id = switch (final_storage_class) {
+ .Generic => blk: {
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
+ .id_result_type = self.typeId(decl_ptr_ty_ref),
+ .id_result = result_id,
+ .pointer = decl_id,
+ });
+ break :blk result_id;
+ },
+ else => decl_id,
+ };
+
+ if (decl_ptr_ty_ref != ty_ref) {
+ // Differing pointer types, insert a cast.
+ const casted_ptr_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = self.typeId(ty_ref),
+ .id_result = casted_ptr_id,
+ .operand = ptr_id,
+ });
+ return casted_ptr_id;
+ } else {
+ return ptr_id;
+ }
+ }
+
+ fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: Decl.Index) !IdRef {
+ const mod = self.module;
+ const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = self.typeId(ty_ref);
+ const decl = mod.declPtr(decl_index);
+ switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+ .func => {
+ // TODO: Properly lower function pointers. For now we are going to hack around it and
+ // just generate an empty pointer. Function pointers are represented by a pointer to usize.
+ return try self.spv.constUndef(ty_ref);
+ },
+ .extern_func => unreachable, // TODO
+ else => {},
+ }
+
+ if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ // Pointer to nothing - return undefined.
+ return self.spv.constUndef(ty_ref);
+ }
+
+ const spv_decl_index = try self.object.resolveDecl(mod, decl_index);
+
+ const decl_id = self.spv.declPtr(spv_decl_index).result_id;
+ try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
+
+ const final_storage_class = spvStorageClass(decl.@"addrspace");
+
+ const decl_ty_ref = try self.resolveType(decl.ty, .indirect);
+ const decl_ptr_ty_ref = try self.spv.ptrType(decl_ty_ref, final_storage_class);
+
+ const ptr_id = switch (final_storage_class) {
+ .Generic => blk: {
+ // Pointer should be Generic, but is actually placed in CrossWorkgroup.
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
+ .id_result_type = self.typeId(decl_ptr_ty_ref),
+ .id_result = result_id,
+ .pointer = decl_id,
+ });
+ break :blk result_id;
+ },
+ else => decl_id,
+ };
+
+ if (decl_ptr_ty_ref != ty_ref) {
+ // Differing pointer types, insert a cast.
+ const casted_ptr_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = ty_id,
+ .id_result = casted_ptr_id,
+ .operand = ptr_id,
+ });
+ return casted_ptr_id;
+ } else {
+ return ptr_id;
+ }
+ }
+
// Turn a Zig type's name into a cache reference.
fn resolveTypeName(self: *DeclGen, ty: Type) !CacheString {
var name = std.ArrayList(u8).init(self.gpa);
@@ -968,6 +1197,22 @@ pub const DeclGen = struct {
return ty_ref;
}
+ fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !CacheRef {
+ const mod = self.module;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ // If the return type is an error set or an error union, then we make this
+ // anyerror return type instead, so that it can be coerced into a function
+ // pointer type which has anyerror as the return type.
+ if (ret_ty.isError(mod)) {
+ return self.resolveType(Type.anyerror, .direct);
+ } else {
+ return self.resolveType(Type.void, .direct);
+ }
+ }
+
+ return try self.resolveType(ret_ty, .direct);
+ }
+
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
const mod = self.module;
@@ -975,14 +1220,31 @@ pub const DeclGen = struct {
log.debug("resolveType: ty = {}", .{ty.fmt(mod)});
const target = self.getTarget();
switch (ty.zigTypeTag(mod)) {
- .Void, .NoReturn => return try self.spv.resolve(.void_type),
+ .NoReturn => {
+ assert(repr == .direct);
+ return try self.spv.resolve(.void_type);
+ },
+ .Void => switch (repr) {
+ .direct => return try self.spv.resolve(.void_type),
+ // Pointers to void
+ .indirect => return try self.spv.resolve(.{ .opaque_type = .{
+ .name = try self.spv.resolveString("void"),
+ } }),
+ },
.Bool => switch (repr) {
.direct => return try self.spv.resolve(.bool_type),
.indirect => return try self.intType(.unsigned, 1),
},
.Int => {
const int_info = ty.intInfo(mod);
- // TODO: Integers in OpenCL kernels are always unsigned.
+ if (int_info.bits == 0) {
+ // Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
+ // with 0 bits is invalid, so return an opaque type in this case.
+ assert(repr == .indirect);
+ return try self.spv.resolve(.{ .opaque_type = .{
+ .name = try self.spv.resolveString("u0"),
+ } });
+ }
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
@@ -1012,10 +1274,32 @@ pub const DeclGen = struct {
const elem_ty = ty.childType(mod);
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
- const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
+ var total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
- const ty_ref = try self.spv.arrayType(total_len, elem_ty_ref);
+ const ty_ref = if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
+ // The size of the array would be 0, but that is not allowed in SPIR-V.
+ // This path can be reached when the backend is asked to generate a pointer to
+ // an array of some zero-bit type. This should always be an indirect path.
+ assert(repr == .indirect);
+
+ // We cannot use the child type here, so just use an opaque type.
+ break :blk try self.spv.resolve(.{ .opaque_type = .{
+ .name = try self.spv.resolveString("zero-sized array"),
+ } });
+ } else if (total_len == 0) blk: {
+ // The size of the array would be 0, but that is not allowed in SPIR-V.
+ // This path can be reached for example when there is a slicing of a pointer
+ // that produces a zero-length array. In all cases where this type can be generated,
+ // this should be an indirect path.
+ assert(repr == .indirect);
+
+ // In this case, we have an array of a non-zero sized type. In this case,
+ // generate an array of 1 element instead, so that ptr_elem_ptr instructions
+ // can be lowered to ptrAccessChain instead of manually performing the math.
+ break :blk try self.spv.arrayType(1, elem_ty_ref);
+ } else try self.spv.arrayType(total_len, elem_ty_ref);
+
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
@@ -1030,14 +1314,19 @@ pub const DeclGen = struct {
const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len);
defer self.gpa.free(param_ty_refs);
- for (param_ty_refs, fn_info.param_types.get(ip)) |*param_type, fn_param_type| {
- param_type.* = try self.resolveType(fn_param_type.toType(), .direct);
+ var param_index: usize = 0;
+ for (fn_info.param_types.get(ip)) |param_ty_index| {
+ const param_ty = param_ty_index.toType();
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ param_ty_refs[param_index] = try self.resolveType(param_ty, .direct);
+ param_index += 1;
}
- const return_ty_ref = try self.resolveType(fn_info.return_type.toType(), .direct);
+ const return_ty_ref = try self.resolveFnReturnType(fn_info.return_type.toType());
const ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = return_ty_ref,
- .parameters = param_ty_refs,
+ .parameters = param_ty_refs[0..param_index],
} });
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
@@ -1072,19 +1361,14 @@ pub const DeclGen = struct {
} });
},
.Vector => {
- // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
- // which work on them), so simply use those.
- // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way.
- // "composite integers" (larger than the largest supported native type) can probably be represented by an array of vectors.
- // TODO: The SPIR-V spec mentions that vector sizes may be quite restricted! look into which we can use, and whether OpTypeVector
- // is adequate at all for this.
-
- // TODO: Properly verify sizes and child type.
-
- return try self.spv.resolve(.{ .vector_type = .{
- .component_type = try self.resolveType(ty.childType(mod), repr),
- .component_count = @as(u32, @intCast(ty.vectorLen(mod))),
- } });
+ if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
+
+ const elem_ty = ty.childType(mod);
+ const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
+
+ const ty_ref = try self.spv.arrayType(ty.vectorLen(mod), elem_ty_ref);
+ try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
+ return ty_ref;
},
.Struct => {
if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
@@ -1126,10 +1410,16 @@ pub const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index];
- const field_name = ip.stringToSlice(struct_type.field_names.get(ip)[field_index]);
- try member_types.append(try self.resolveType(field_ty.toType(), .indirect));
- try member_names.append(try self.spv.resolveString(field_name));
+ const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ // This is a zero-bit field - we only needed it for the alignment.
+ continue;
+ }
+
+ const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
+ try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index});
+ try member_types.append(try self.resolveType(field_ty, .indirect));
+ try member_names.append(try self.spv.resolveString(ip.stringToSlice(field_name)));
}
const ty_ref = try self.spv.resolve(.{ .struct_type = .{
@@ -1215,6 +1505,13 @@ pub const DeclGen = struct {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
+ .Opaque => {
+ return try self.spv.resolve(.{
+ .opaque_type = .{
+ .name = .none, // TODO
+ },
+ });
+ },
.Null,
.Undefined,
@@ -1431,7 +1728,7 @@ pub const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
const decl = mod.declPtr(self.decl_index);
- const spv_decl_index = try self.resolveDecl(self.decl_index);
+ const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index);
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
@@ -1439,20 +1736,23 @@ pub const DeclGen = struct {
if (decl.val.getFunction(mod)) |_| {
assert(decl.ty.zigTypeTag(mod) == .Fn);
+ const fn_info = mod.typeToFunc(decl.ty).?;
+ const return_ty_ref = try self.resolveFnReturnType(fn_info.return_type.toType());
+
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)),
+ .id_result_type = self.typeId(return_ty_ref),
.id_result = decl_id,
.function_control = .{}, // TODO: We can set inline here if the type requires it.
.function_type = prototype_id,
});
- const fn_info = mod.typeToFunc(decl.ty).?;
-
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
- for (0..fn_info.param_types.len) |i| {
- const param_type = fn_info.param_types.get(ip)[i];
- const param_type_id = try self.resolveTypeId(param_type.toType());
+ for (fn_info.param_types.get(ip)) |param_ty_index| {
+ const param_ty = param_ty_index.toType();
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const param_type_id = try self.resolveTypeId(param_ty);
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -1693,11 +1993,15 @@ pub const DeclGen = struct {
.shl => try self.airShift(inst, .OpShiftLeftLogical),
+ .min => try self.airMinMax(inst, .lt),
+ .max => try self.airMinMax(inst, .gt),
+
.bitcast => try self.airBitCast(inst),
.intcast, .trunc => try self.airIntCast(inst),
.int_from_ptr => try self.airIntFromPtr(inst),
.float_from_int => try self.airFloatFromInt(inst),
.int_from_float => try self.airIntFromFloat(inst),
+ .fpext, .fptrunc => try self.airFloatCast(inst),
.not => try self.airNot(inst),
.array_to_slice => try self.airArrayToSlice(inst),
@@ -1723,12 +2027,13 @@ pub const DeclGen = struct {
.struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3),
- .cmp_eq => try self.airCmp(inst, .eq),
- .cmp_neq => try self.airCmp(inst, .neq),
- .cmp_gt => try self.airCmp(inst, .gt),
- .cmp_gte => try self.airCmp(inst, .gte),
- .cmp_lt => try self.airCmp(inst, .lt),
- .cmp_lte => try self.airCmp(inst, .lte),
+ .cmp_eq => try self.airCmp(inst, .eq),
+ .cmp_neq => try self.airCmp(inst, .neq),
+ .cmp_gt => try self.airCmp(inst, .gt),
+ .cmp_gte => try self.airCmp(inst, .gte),
+ .cmp_lt => try self.airCmp(inst, .lt),
+ .cmp_lte => try self.airCmp(inst, .lte),
+ .cmp_vector => try self.airVectorCmp(inst),
.arg => self.airArg(),
.alloc => try self.airAlloc(inst),
@@ -1784,13 +2089,30 @@ pub const DeclGen = struct {
try self.inst_results.putNoClobber(self.gpa, inst, result_id);
}
- fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !?IdRef {
- if (self.liveness.isUnused(inst)) return null;
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const lhs_id = try self.resolve(bin_op.lhs);
- const rhs_id = try self.resolve(bin_op.rhs);
+ fn binOpSimple(self: *DeclGen, ty: Type, lhs_id: IdRef, rhs_id: IdRef, comptime opcode: Opcode) !IdRef {
+ const mod = self.module;
+
+ if (ty.isVector(mod)) {
+ const child_ty = ty.childType(mod);
+ const vector_len = ty.vectorLen(mod);
+
+ var constituents = try self.gpa.alloc(IdRef, vector_len);
+ defer self.gpa.free(constituents);
+
+ for (constituents, 0..) |*constituent, i| {
+ const lhs_index_id = try self.extractField(child_ty, lhs_id, @intCast(i));
+ const rhs_index_id = try self.extractField(child_ty, rhs_id, @intCast(i));
+ const result_id = try self.binOpSimple(child_ty, lhs_index_id, rhs_index_id, opcode);
+ constituent.* = try self.convertToIndirect(child_ty, result_id);
+ }
+
+ const result_ty = try self.resolveType(child_ty, .indirect);
+ const result_ty_ref = try self.spv.arrayType(vector_len, result_ty);
+ return try self.constructArray(result_ty_ref, constituents);
+ }
+
const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst));
+ const result_type_id = try self.resolveTypeId(ty);
try self.func.body.emit(self.spv.gpa, opcode, .{
.id_result_type = result_type_id,
.id_result = result_id,
@@ -1800,6 +2122,17 @@ pub const DeclGen = struct {
return result_id;
}
+ fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
+ const ty = self.typeOf(bin_op.lhs);
+
+ return try self.binOpSimple(ty, lhs_id, rhs_id, opcode);
+ }
+
fn airShift(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1825,19 +2158,99 @@ pub const DeclGen = struct {
return result_id;
}
- fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef {
- const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(bits))) - 1;
+ fn airMinMax(self: *DeclGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
+ const result_ty = self.typeOfIndex(inst);
+ const result_ty_ref = try self.resolveType(result_ty, .direct);
+
+ const info = try self.arithmeticTypeInfo(result_ty);
+ // TODO: Use fmin for OpenCL
+ const cmp_id = try self.cmp(op, result_ty, lhs_id, rhs_id);
+ const selection_id = switch (info.class) {
+ .float => blk: {
+ // cmp uses OpFOrd. When we have 0 [<>] nan this returns false,
+ // but we want it to pick lhs. Therefore we also have to check if
+ // rhs is nan. We don't need to care about the result when both
+ // are nan.
+ const rhs_is_nan_id = self.spv.allocId();
+ const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ try self.func.body.emit(self.spv.gpa, .OpIsNan, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = rhs_is_nan_id,
+ .x = rhs_id,
+ });
+ const float_cmp_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = float_cmp_id,
+ .operand_1 = cmp_id,
+ .operand_2 = rhs_is_nan_id,
+ });
+ break :blk float_cmp_id;
+ },
+ else => cmp_id,
+ };
+
const result_id = self.spv.allocId();
- const mask_id = try self.constInt(ty_ref, mask_value);
- try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
- .id_result_type = self.typeId(ty_ref),
+ try self.func.body.emit(self.spv.gpa, .OpSelect, .{
+ .id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
- .operand_1 = value_id,
- .operand_2 = mask_id,
+ .condition = selection_id,
+ .object_1 = lhs_id,
+ .object_2 = rhs_id,
});
return result_id;
}
+ /// This function canonicalizes a "strange" integer value:
+ /// For unsigned integers, the value is masked so that only the relevant bits can contain
+ /// non-zeros.
+ /// For signed integers, the value is also sign extended.
+ fn normalizeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, info: ArithmeticTypeInfo) !IdRef {
+ assert(info.class != .composite_integer); // TODO
+ if (info.bits == info.backing_bits) {
+ return value_id;
+ }
+
+ switch (info.signedness) {
+ .unsigned => {
+ const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1;
+ const result_id = self.spv.allocId();
+ const mask_id = try self.constInt(ty_ref, mask_value);
+ try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
+ .id_result_type = self.typeId(ty_ref),
+ .id_result = result_id,
+ .operand_1 = value_id,
+ .operand_2 = mask_id,
+ });
+ return result_id;
+ },
+ .signed => {
+ // Shift left and right so that we can copy the sight bit that way.
+ const shift_amt_id = try self.constInt(ty_ref, info.backing_bits - info.bits);
+ const left_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpShiftLeftLogical, .{
+ .id_result_type = self.typeId(ty_ref),
+ .id_result = left_id,
+ .base = value_id,
+ .shift = shift_amt_id,
+ });
+ const right_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpShiftRightArithmetic, .{
+ .id_result_type = self.typeId(ty_ref),
+ .id_result = right_id,
+ .base = left_id,
+ .shift = shift_amt_id,
+ });
+ return right_id;
+ },
+ }
+ }
+
fn airArithOp(
self: *DeclGen,
inst: Air.Inst.Index,
@@ -1848,18 +2261,52 @@ pub const DeclGen = struct {
comptime modular: bool,
) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
// the result to be the same as the LHS and RHS, which matches SPIR-V.
const ty = self.typeOfIndex(inst);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- var lhs_id = try self.resolve(bin_op.lhs);
- var rhs_id = try self.resolve(bin_op.rhs);
-
- const result_ty_ref = try self.resolveType(ty, .direct);
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
assert(self.typeOf(bin_op.lhs).eql(ty, self.module));
assert(self.typeOf(bin_op.rhs).eql(ty, self.module));
+ return try self.arithOp(ty, lhs_id, rhs_id, fop, sop, uop, modular);
+ }
+
+ fn arithOp(
+ self: *DeclGen,
+ ty: Type,
+ lhs_id_: IdRef,
+ rhs_id_: IdRef,
+ comptime fop: Opcode,
+ comptime sop: Opcode,
+ comptime uop: Opcode,
+ /// true if this operation holds under modular arithmetic.
+ comptime modular: bool,
+ ) !IdRef {
+ var rhs_id = rhs_id_;
+ var lhs_id = lhs_id_;
+
+ const mod = self.module;
+ const result_ty_ref = try self.resolveType(ty, .direct);
+
+ if (ty.isVector(mod)) {
+ const child_ty = ty.childType(mod);
+ const vector_len = ty.vectorLen(mod);
+ var constituents = try self.gpa.alloc(IdRef, vector_len);
+ defer self.gpa.free(constituents);
+
+ for (constituents, 0..) |*constituent, i| {
+ const lhs_index_id = try self.extractField(child_ty, lhs_id, @intCast(i));
+ const rhs_index_id = try self.extractField(child_ty, rhs_id, @intCast(i));
+ constituent.* = try self.arithOp(child_ty, lhs_index_id, rhs_index_id, fop, sop, uop, modular);
+ }
+
+ return self.constructArray(result_ty_ref, constituents);
+ }
+
// Binary operations are generally applicable to both scalar and vector operations
// in SPIR-V, but int and float versions of operations require different opcodes.
const info = try self.arithmeticTypeInfo(ty);
@@ -1870,8 +2317,8 @@ pub const DeclGen = struct {
},
.strange_integer => blk: {
if (!modular) {
- lhs_id = try self.maskStrangeInt(result_ty_ref, lhs_id, info.bits);
- rhs_id = try self.maskStrangeInt(result_ty_ref, rhs_id, info.bits);
+ lhs_id = try self.normalizeInt(result_ty_ref, lhs_id, info);
+ rhs_id = try self.normalizeInt(result_ty_ref, rhs_id, info);
}
break :blk switch (info.signedness) {
.signed => @as(usize, 1),
@@ -2174,8 +2621,7 @@ pub const DeclGen = struct {
fn cmp(
self: *DeclGen,
- comptime op: std.math.CompareOperator,
- bool_ty_id: IdRef,
+ op: std.math.CompareOperator,
ty: Type,
lhs_id: IdRef,
rhs_id: IdRef,
@@ -2183,38 +2629,104 @@ pub const DeclGen = struct {
const mod = self.module;
var cmp_lhs_id = lhs_id;
var cmp_rhs_id = rhs_id;
- const opcode: Opcode = opcode: {
- const op_ty = switch (ty.zigTypeTag(mod)) {
- .Int, .Bool, .Float => ty,
- .Enum => ty.intTagType(mod),
- .ErrorSet => Type.u16,
- .Pointer => blk: {
- // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
- // currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using
- // OpConvertPtrToU...
- cmp_lhs_id = self.spv.allocId();
- cmp_rhs_id = self.spv.allocId();
-
- const usize_ty_id = self.typeId(try self.sizeType());
-
- try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
- .id_result_type = usize_ty_id,
- .id_result = cmp_lhs_id,
- .pointer = lhs_id,
- });
+ const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const op_ty = switch (ty.zigTypeTag(mod)) {
+ .Int, .Bool, .Float => ty,
+ .Enum => ty.intTagType(mod),
+ .ErrorSet => Type.u16,
+ .Pointer => blk: {
+ // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
+ // currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using
+ // OpConvertPtrToU...
+ cmp_lhs_id = self.spv.allocId();
+ cmp_rhs_id = self.spv.allocId();
+
+ const usize_ty_id = self.typeId(try self.sizeType());
+
+ try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = cmp_lhs_id,
+ .pointer = lhs_id,
+ });
- try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
- .id_result_type = usize_ty_id,
- .id_result = cmp_rhs_id,
- .pointer = rhs_id,
- });
+ try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = cmp_rhs_id,
+ .pointer = rhs_id,
+ });
- break :blk Type.usize;
- },
- .Optional => unreachable, // TODO
- else => unreachable,
- };
+ break :blk Type.usize;
+ },
+ .Optional => {
+ const payload_ty = ty.optionalChild(mod);
+ if (ty.optionalReprIsPayload(mod)) {
+ assert(payload_ty.hasRuntimeBitsIgnoreComptime(mod));
+ assert(!payload_ty.isSlice(mod));
+ return self.cmp(op, payload_ty, lhs_id, rhs_id);
+ }
+
+ const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ try self.extractField(Type.bool, lhs_id, 1)
+ else
+ try self.convertToDirect(Type.bool, lhs_id);
+
+ const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ try self.extractField(Type.bool, rhs_id, 1)
+ else
+ try self.convertToDirect(Type.bool, rhs_id);
+
+ const valid_cmp_id = try self.cmp(op, Type.bool, lhs_valid_id, rhs_valid_id);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ return valid_cmp_id;
+ }
+
+ // TODO: Should we short circuit here? It shouldn't affect correctness, but
+ // perhaps it will generate more efficient code.
+
+ const lhs_pl_id = try self.extractField(payload_ty, lhs_id, 0);
+ const rhs_pl_id = try self.extractField(payload_ty, rhs_id, 0);
+
+ const pl_cmp_id = try self.cmp(op, payload_ty, lhs_pl_id, rhs_pl_id);
+ // op == .eq => lhs_valid == rhs_valid && lhs_pl == rhs_pl
+ // op == .neq => lhs_valid != rhs_valid || lhs_pl != rhs_pl
+
+ const result_id = self.spv.allocId();
+ const args = .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = result_id,
+ .operand_1 = valid_cmp_id,
+ .operand_2 = pl_cmp_id,
+ };
+ switch (op) {
+ .eq => try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, args),
+ .neq => try self.func.body.emit(self.spv.gpa, .OpLogicalOr, args),
+ else => unreachable,
+ }
+ return result_id;
+ },
+ .Vector => {
+ const child_ty = ty.childType(mod);
+ const vector_len = ty.vectorLen(mod);
+ const bool_ty_ref_indirect = try self.resolveType(Type.bool, .indirect);
+
+ var constituents = try self.gpa.alloc(IdRef, vector_len);
+ defer self.gpa.free(constituents);
+
+ for (constituents, 0..) |*constituent, i| {
+ const lhs_index_id = try self.extractField(child_ty, cmp_lhs_id, @intCast(i));
+ const rhs_index_id = try self.extractField(child_ty, cmp_rhs_id, @intCast(i));
+ const result_id = try self.cmp(op, child_ty, lhs_index_id, rhs_index_id);
+ constituent.* = try self.convertToIndirect(Type.bool, result_id);
+ }
+
+ const result_ty_ref = try self.spv.arrayType(vector_len, bool_ty_ref_indirect);
+ return try self.constructArray(result_ty_ref, constituents);
+ },
+ else => unreachable,
+ };
+
+ const opcode: Opcode = opcode: {
const info = try self.arithmeticTypeInfo(op_ty);
const signedness = switch (info.class) {
.composite_integer => {
@@ -2222,7 +2734,7 @@ pub const DeclGen = struct {
},
.float => break :opcode switch (op) {
.eq => .OpFOrdEqual,
- .neq => .OpFOrdNotEqual,
+ .neq => .OpFUnordNotEqual,
.lt => .OpFOrdLessThan,
.lte => .OpFOrdLessThanEqual,
.gt => .OpFOrdGreaterThan,
@@ -2236,8 +2748,8 @@ pub const DeclGen = struct {
.strange_integer => sign: {
const op_ty_ref = try self.resolveType(op_ty, .direct);
// Mask operands before performing comparison.
- cmp_lhs_id = try self.maskStrangeInt(op_ty_ref, cmp_lhs_id, info.bits);
- cmp_rhs_id = try self.maskStrangeInt(op_ty_ref, cmp_rhs_id, info.bits);
+ cmp_lhs_id = try self.normalizeInt(op_ty_ref, cmp_lhs_id, info);
+ cmp_rhs_id = try self.normalizeInt(op_ty_ref, cmp_rhs_id, info);
break :sign info.signedness;
},
.integer => info.signedness,
@@ -2265,7 +2777,7 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emitRaw(self.spv.gpa, opcode, 4);
- self.func.body.writeOperand(spec.IdResultType, bool_ty_id);
+ self.func.body.writeOperand(spec.IdResultType, self.typeId(bool_ty_ref));
self.func.body.writeOperand(spec.IdResult, result_id);
self.func.body.writeOperand(spec.IdResultType, cmp_lhs_id);
self.func.body.writeOperand(spec.IdResultType, cmp_rhs_id);
@@ -2281,11 +2793,22 @@ pub const DeclGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
- const bool_ty_id = try self.resolveTypeId(Type.bool);
const ty = self.typeOf(bin_op.lhs);
- assert(ty.eql(self.typeOf(bin_op.rhs), self.module));
- return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id);
+ return try self.cmp(op, ty, lhs_id, rhs_id);
+ }
+
+ fn airVectorCmp(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const vec_cmp = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ const lhs_id = try self.resolve(vec_cmp.lhs);
+ const rhs_id = try self.resolve(vec_cmp.rhs);
+ const op = vec_cmp.compareOperator();
+ const ty = self.typeOf(vec_cmp.lhs);
+
+ return try self.cmp(op, ty, lhs_id, rhs_id);
}
fn bitCast(
@@ -2295,26 +2818,58 @@ pub const DeclGen = struct {
src_id: IdRef,
) !IdRef {
const mod = self.module;
+ const src_ty_ref = try self.resolveType(src_ty, .direct);
const dst_ty_ref = try self.resolveType(dst_ty, .direct);
- const result_id = self.spv.allocId();
+ if (src_ty_ref == dst_ty_ref) {
+ return src_id;
+ }
// TODO: Some more cases are missing here
// See fn bitCast in llvm.zig
if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
+ const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
.integer_value = src_id,
});
- } else {
+ return result_id;
+ }
+
+ // We can only use OpBitcast for specific conversions: between numerical types, and
+ // between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
+ // otherwise use a temporary and perform a pointer cast.
+ const src_key = self.spv.cache.lookup(src_ty_ref);
+ const dst_key = self.spv.cache.lookup(dst_ty_ref);
+
+ if ((src_key.isNumericalType() and dst_key.isNumericalType()) or (src_key == .ptr_type and dst_key == .ptr_type)) {
+ const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
.operand = src_id,
});
+ return result_id;
}
- return result_id;
+
+ const src_ptr_ty_ref = try self.spv.ptrType(src_ty_ref, .Function);
+ const dst_ptr_ty_ref = try self.spv.ptrType(dst_ty_ref, .Function);
+
+ const tmp_id = self.spv.allocId();
+ try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
+ .id_result_type = self.typeId(src_ptr_ty_ref),
+ .id_result = tmp_id,
+ .storage_class = .Function,
+ });
+ try self.store(src_ty, tmp_id, src_id, false);
+ const casted_ptr_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = self.typeId(dst_ptr_ty_ref),
+ .id_result = casted_ptr_id,
+ .operand = tmp_id,
+ });
+ return try self.load(dst_ty, casted_ptr_id, false);
}
fn airBitCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -2331,25 +2886,33 @@ pub const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
- const dest_ty = self.typeOfIndex(inst);
- const dest_ty_id = try self.resolveTypeId(dest_ty);
+ const src_ty = self.typeOf(ty_op.operand);
+ const dst_ty = self.typeOfIndex(inst);
+ const src_ty_ref = try self.resolveType(src_ty, .direct);
+ const dst_ty_ref = try self.resolveType(dst_ty, .direct);
- const mod = self.module;
- const dest_info = dest_ty.intInfo(mod);
+ const src_info = try self.arithmeticTypeInfo(src_ty);
+ const dst_info = try self.arithmeticTypeInfo(dst_ty);
- // TODO: Masking?
+ // While intcast promises that the value already fits, the upper bits of a
+ // strange integer may contain garbage. Therefore, mask/sign extend it before.
+ const src_id = try self.normalizeInt(src_ty_ref, operand_id, src_info);
+
+ if (src_info.backing_bits == dst_info.backing_bits) {
+ return src_id;
+ }
const result_id = self.spv.allocId();
- switch (dest_info.signedness) {
+ switch (dst_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpSConvert, .{
- .id_result_type = dest_ty_id,
+ .id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
- .signed_value = operand_id,
+ .signed_value = src_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
- .id_result_type = dest_ty_id,
+ .id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
- .unsigned_value = operand_id,
+ .unsigned_value = src_id,
}),
}
return result_id;
@@ -2422,6 +2985,23 @@ pub const DeclGen = struct {
return result_id;
}
+ fn airFloatCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_ty_id = try self.resolveTypeId(dest_ty);
+
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpFConvert, .{
+ .id_result_type = dest_ty_id,
+ .id_result = result_id,
+ .float_value = operand_id,
+ });
+ return result_id;
+ }
+
fn airNot(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -2461,22 +3041,23 @@ pub const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const array_ptr_ty = self.typeOf(ty_op.operand);
const array_ty = array_ptr_ty.childType(mod);
- const elem_ty = array_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
- const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(array_ptr_ty.ptrAddressSpace(mod)));
const slice_ty = self.typeOfIndex(inst);
+ const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
+
+ const elem_ptr_ty_ref = try self.resolveType(elem_ptr_ty, .direct);
const slice_ty_ref = try self.resolveType(slice_ty, .direct);
const size_ty_ref = try self.sizeType();
const array_ptr_id = try self.resolve(ty_op.operand);
const len_id = try self.constInt(size_ty_ref, array_ty.arrayLen(mod));
- if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- unreachable; // TODO
- }
+ const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod))
+ // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
+ try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
+ else
+ // Convert the pointer-to-array to a pointer to the first element.
+ try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0});
- // Convert the pointer-to-array to a pointer to the first element.
- const elem_ptr_id = try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0});
return try self.constructStruct(slice_ty_ref, &.{ elem_ptr_id, len_id });
}
@@ -2500,6 +3081,7 @@ pub const DeclGen = struct {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
+ const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
const result_ty_ref = try self.resolveType(result_ty, .direct);
@@ -2508,15 +3090,53 @@ pub const DeclGen = struct {
switch (result_ty.zigTypeTag(mod)) {
.Vector => unreachable, // TODO
- .Struct => unreachable, // TODO
+ .Struct => {
+ if (mod.typeToPackedStruct(result_ty)) |struct_type| {
+ _ = struct_type;
+ unreachable; // TODO
+ }
+
+ const constituents = try self.gpa.alloc(IdRef, elements.len);
+ defer self.gpa.free(constituents);
+ var index: usize = 0;
+
+ switch (ip.indexToKey(result_ty.toIntern())) {
+ .anon_struct_type => |tuple| {
+ for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
+ if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
+ assert(field_ty.toType().hasRuntimeBits(mod));
+
+ const id = try self.resolve(element);
+ constituents[index] = try self.convertToIndirect(field_ty.toType(), id);
+ index += 1;
+ }
+ },
+ .struct_type => |struct_type| {
+ var it = struct_type.iterateRuntimeOrder(ip);
+ for (elements, 0..) |element, i| {
+ const field_index = it.next().?;
+ if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
+ const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ assert(field_ty.hasRuntimeBitsIgnoreComptime(mod));
+
+ const id = try self.resolve(element);
+ constituents[index] = try self.convertToIndirect(field_ty, id);
+ index += 1;
+ }
+ },
+ else => unreachable,
+ }
+
+ return try self.constructStruct(result_ty_ref, constituents[0..index]);
+ },
.Array => {
const array_info = result_ty.arrayInfo(mod);
const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(mod));
const elem_ids = try self.gpa.alloc(IdRef, n_elems);
defer self.gpa.free(elem_ids);
- for (elements, 0..) |elem_inst, i| {
- const id = try self.resolve(elem_inst);
+ for (elements, 0..) |element, i| {
+ const id = try self.resolve(element);
elem_ids[i] = try self.convertToIndirect(array_info.elem_type, id);
}
@@ -2540,7 +3160,8 @@ pub const DeclGen = struct {
fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
const mod = self.module;
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
@@ -2593,14 +3214,17 @@ pub const DeclGen = struct {
const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType(mod);
- // TODO: Make this return a null ptr or something
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
-
+ const src_ptr_ty = self.typeOf(bin_op.lhs);
+ const elem_ty = src_ptr_ty.childType(mod);
const ptr_id = try self.resolve(bin_op.lhs);
+
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const dst_ptr_ty = self.typeOfIndex(inst);
+ return try self.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id);
+ }
+
const index_id = try self.resolve(bin_op.rhs);
- return try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
+ return try self.ptrElemPtr(src_ptr_ty, ptr_id, index_id);
}
fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3101,15 +3725,25 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
- const operand_ty = self.typeOf(operand);
+ const ret_ty = self.typeOf(operand);
const mod = self.module;
- if (operand_ty.hasRuntimeBits(mod)) {
- // TODO: If we return an empty struct, this branch is also hit incorrectly.
- const operand_id = try self.resolve(operand);
- try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
- } else {
- try self.func.body.emit(self.spv.gpa, .OpReturn, {});
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const decl = mod.declPtr(self.decl_index);
+ const fn_info = mod.typeToFunc(decl.ty).?;
+ if (fn_info.return_type.toType().isError(mod)) {
+ // Functions with an empty error set are emitted with an error code
+ // return type and return zero so they can be function pointers coerced
+ // to functions that return anyerror.
+ const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
+ const no_err_id = try self.constInt(err_ty_ref, 0);
+ return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id });
+ } else {
+ return try self.func.body.emit(self.spv.gpa, .OpReturn, {});
+ }
}
+
+ const operand_id = try self.resolve(operand);
+ try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
}
fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -3119,8 +3753,18 @@ pub const DeclGen = struct {
const ret_ty = ptr_ty.childType(mod);
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- try self.func.body.emit(self.spv.gpa, .OpReturn, {});
- return;
+ const decl = mod.declPtr(self.decl_index);
+ const fn_info = mod.typeToFunc(decl.ty).?;
+ if (fn_info.return_type.toType().isError(mod)) {
+ // Functions with an empty error set are emitted with an error code
+ // return type and return zero so they can be function pointers coerced
+ // to functions that return anyerror.
+ const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
+ const no_err_id = try self.constInt(err_ty_ref, 0);
+ return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id });
+ } else {
+ return try self.func.body.emit(self.spv.gpa, .OpReturn, {});
+ }
}
const ptr = try self.resolve(un_op);
@@ -3297,31 +3941,26 @@ pub const DeclGen = struct {
payload_ty;
const ptr_id = if (payload_ty.isSlice(mod))
- try self.extractField(Type.bool, operand_id, 0)
+ try self.extractField(ptr_ty, operand_id, 0)
else
operand_id;
const payload_ty_ref = try self.resolveType(ptr_ty, .direct);
const null_id = try self.spv.constNull(payload_ty_ref);
- const result_id = self.spv.allocId();
- const operands = .{
- .id_result_type = self.typeId(bool_ty_ref),
- .id_result = result_id,
- .operand_1 = ptr_id,
- .operand_2 = null_id,
+ const op: std.math.CompareOperator = switch (pred) {
+ .is_null => .eq,
+ .is_non_null => .neq,
};
- switch (pred) {
- .is_null => try self.func.body.emit(self.spv.gpa, .OpPtrEqual, operands),
- .is_non_null => try self.func.body.emit(self.spv.gpa, .OpPtrNotEqual, operands),
- }
- return result_id;
+ return try self.cmp(op, ptr_ty, ptr_id, null_id);
}
- const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod))
+ const is_non_null_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
- operand_id;
+ // Optionals with no payload are represented as an (indirect) bool, so convert
+ // it back to the direct bool here.
+ try self.convertToDirect(Type.bool, operand_id);
return switch (pred) {
.is_null => blk: {
@@ -3400,17 +4039,19 @@ pub const DeclGen = struct {
const payload_ty = self.typeOf(ty_op.operand);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return try self.constBool(true, .direct);
+ return try self.constBool(true, .indirect);
}
const operand_id = try self.resolve(ty_op.operand);
+
const optional_ty = self.typeOfIndex(inst);
if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
const optional_ty_ref = try self.resolveType(optional_ty, .direct);
- const members = [_]IdRef{ operand_id, try self.constBool(true, .indirect) };
+ const payload_id = try self.convertToIndirect(payload_ty, operand_id);
+ const members = [_]IdRef{ payload_id, try self.constBool(true, .indirect) };
return try self.constructStruct(optional_ty_ref, &members);
}
@@ -3437,6 +4078,7 @@ pub const DeclGen = struct {
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
+ .ErrorSet => 1,
else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
};
@@ -3490,6 +4132,7 @@ pub const DeclGen = struct {
// TODO: figure out of cond_ty is correct (something with enum literals)
break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
},
+ .ErrorSet => value.getErrorInt(mod),
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -3533,10 +4176,10 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(
- self.module,
- self.module.declPtr(self.decl_index),
- );
+ const mod = self.module;
+ const decl = mod.declPtr(self.decl_index);
+ const path = decl.getFileScope(mod).sub_file_path;
+ const src_fname_id = try self.spv.resolveSourceFileName(path);
const base_line = self.base_line_stack.getLast();
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
@@ -3710,7 +4353,7 @@ pub const DeclGen = struct {
const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_id = try self.resolveTypeId(return_type.toType());
+ const result_type_ref = try self.resolveFnReturnType(return_type.toType());
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -3722,16 +4365,16 @@ pub const DeclGen = struct {
// Note: resolve() might emit instructions, so we need to call it
// before starting to emit OpFunctionCall instructions. Hence the
// temporary params buffer.
- const arg_id = try self.resolve(arg);
const arg_ty = self.typeOf(arg);
if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ const arg_id = try self.resolve(arg);
params[n_params] = arg_id;
n_params += 1;
}
try self.func.body.emit(self.spv.gpa, .OpFunctionCall, .{
- .id_result_type = result_type_id,
+ .id_result_type = self.typeId(result_type_ref),
.id_result = result_id,
.function = callee_id,
.id_ref_3 = params[0..n_params],