aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorAli Cheraghi <alichraghi@proton.me>2025-02-15 00:08:05 +0330
committerAli Cheraghi <alichraghi@proton.me>2025-02-18 18:08:47 +0330
commitd5e1cb3ea2cdd85bc2a9ca002d69d121a94f721c (patch)
tree3dbfebe63a8723c3f761fdb297434d60d05e4354 /src/codegen
parent85169bbba24d7e7592a24de5af6743b34bfe5961 (diff)
downloadzig-d5e1cb3ea2cdd85bc2a9ca002d69d121a94f721c.tar.gz
zig-d5e1cb3ea2cdd85bc2a9ca002d69d121a94f721c.zip
spirv: ziggify and remove unknown spirv features
`OpCapability` and `OpExtension` now can also be emitted from inline assembly
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/spirv.zig36
-rw-r--r--src/codegen/spirv/Assembler.zig10
-rw-r--r--src/codegen/spirv/Module.zig83
3 files changed, 102 insertions, 27 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 58182ead0b..d6997c7e6c 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -552,7 +552,7 @@ const NavGen = struct {
}
fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
- if (self.spv.hasFeature(.Kernel)) {
+ if (self.spv.hasFeature(.kernel)) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
.id_result_type = type_id,
@@ -591,10 +591,10 @@ const NavGen = struct {
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{
- .{ .bits = 8, .feature = .Int8 },
- .{ .bits = 16, .feature = .Int16 },
+ .{ .bits = 8, .feature = .int8 },
+ .{ .bits = 16, .feature = .int16 },
.{ .bits = 32, .feature = null },
- .{ .bits = 64, .feature = .Int64 },
+ .{ .bits = 64, .feature = .int64 },
};
for (ints) |int| {
@@ -612,7 +612,7 @@ const NavGen = struct {
/// is no way of knowing whether those are actually supported.
/// TODO: Maybe this should be cached?
fn largestSupportedIntBits(self: *NavGen) u16 {
- return if (self.spv.hasFeature(.Int64)) 64 else 32;
+ return if (self.spv.hasFeature(.int64)) 64 else 32;
}
/// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by
@@ -644,7 +644,7 @@ const NavGen = struct {
if (elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type) {
if (len > 1 and len <= 4) return true;
- if (self.spv.hasFeature(.Vector16)) return (len == 8 or len == 16);
+ if (self.spv.hasFeature(.vector16)) return (len == 8 or len == 16);
}
return false;
@@ -1241,7 +1241,7 @@ const NavGen = struct {
};
// Kernel only supports unsigned ints.
- if (self.spv.hasFeature(.Kernel)) {
+ if (self.spv.hasFeature(.kernel)) {
return self.spv.intType(.unsigned, backing_bits);
}
@@ -1465,10 +1465,10 @@ const NavGen = struct {
// so if the float is not supported, just return an error.
const bits = ty.floatBits(target);
const supported = switch (bits) {
- 16 => Target.spirv.featureSetHas(target.cpu.features, .Float16),
+ 16 => self.spv.hasFeature(.float16),
// 32-bit floats are always supported (see spec, 2.16.1, Data rules).
32 => true,
- 64 => Target.spirv.featureSetHas(target.cpu.features, .Float64),
+ 64 => self.spv.hasFeature(.float64),
else => false,
};
@@ -1511,7 +1511,7 @@ const NavGen = struct {
return try self.arrayType(1, elem_ty_id);
} else {
const result_id = try self.arrayType(total_len, elem_ty_id);
- if (self.spv.hasFeature(.Shader)) {
+ if (self.spv.hasFeature(.shader)) {
try self.spv.decorate(result_id, .{ .ArrayStride = .{
.array_stride = @intCast(elem_ty.abiSize(zcu)),
} });
@@ -1645,7 +1645,7 @@ const NavGen = struct {
continue;
}
- if (self.spv.hasFeature(.Shader)) {
+ if (self.spv.hasFeature(.shader)) {
try self.spv.decorateMember(result_id, index, .{ .Offset = .{
.byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)),
} });
@@ -1748,10 +1748,10 @@ const NavGen = struct {
fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass {
return switch (as) {
- .generic => if (self.spv.hasFeature(.GenericPointer)) .Generic else .Function,
+ .generic => if (self.spv.hasFeature(.generic_pointer)) .Generic else .Function,
.shared => .Workgroup,
.local => .Function,
- .global => if (self.spv.hasFeature(.Shader)) .PhysicalStorageBuffer else .CrossWorkgroup,
+ .global => if (self.spv.hasFeature(.shader)) .PhysicalStorageBuffer else .CrossWorkgroup,
.constant => .UniformConstant,
.push_constant => .PushConstant,
.input => .Input,
@@ -2461,7 +2461,7 @@ const NavGen = struct {
// TODO: These instructions don't seem to be working
// properly for LLVM-based backends on OpenCL for 8- and
// 16-component vectors.
- .i_abs => if (self.spv.hasFeature(.Vector16) and v.components() >= 8) v.unroll() else v,
+ .i_abs => if (self.spv.hasFeature(.vector16) and v.components() >= 8) v.unroll() else v,
else => v,
};
};
@@ -3650,7 +3650,7 @@ const NavGen = struct {
// depending on the result type. Do that when
// bitCast is implemented for vectors.
// This is only relevant for Vulkan
- assert(self.spv.hasFeature(.Kernel)); // TODO
+ assert(self.spv.hasFeature(.kernel)); // TODO
return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty));
},
@@ -3968,7 +3968,7 @@ const NavGen = struct {
.float, .bool => unreachable,
}
- assert(self.spv.hasFeature(.Kernel)); // TODO
+ assert(self.spv.hasFeature(.kernel)); // TODO
const count = try self.buildUnary(op, operand);
@@ -4204,7 +4204,7 @@ const NavGen = struct {
defer self.gpa.free(ids);
const result_id = self.spv.allocId();
- if (self.spv.hasFeature(.Kernel)) {
+ if (self.spv.hasFeature(.kernel)) {
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
.id_result_type = result_ty_id,
.id_result = result_id,
@@ -5290,7 +5290,7 @@ const NavGen = struct {
.initializer = options.initializer,
});
- if (self.spv.hasFeature(.Shader)) return var_id;
+ if (self.spv.hasFeature(.shader)) return var_id;
switch (options.storage_class) {
.Generic => {
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index 2cfb590273..6bb79a2ebe 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -274,6 +274,16 @@ fn processInstruction(self: *Assembler) !void {
.OpEntryPoint => {
return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{});
},
+ .OpCapability => {
+ try self.spv.addCapability(@enumFromInt(self.inst.operands.items[0].value));
+ return;
+ },
+ .OpExtension => {
+ const ext_name_offset = self.inst.operands.items[0].string;
+ const ext_name = std.mem.sliceTo(self.inst.string_bytes.items[ext_name_offset..], 0);
+ try self.spv.addExtension(ext_name);
+ return;
+ },
.OpExtInstImport => blk: {
const set_name_offset = self.inst.operands.items[1].string;
const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0);
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index 6d5b0afccc..317e32c878 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -183,8 +183,11 @@ cache: struct {
array_types: std.AutoHashMapUnmanaged(struct { IdRef, IdRef }, IdRef) = .empty,
function_types: DeepHashMap(struct { IdRef, []const IdRef }, IdRef) = .empty,
- builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
+ capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
+ extensions: std.StringHashMapUnmanaged(void) = .empty,
+ extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
decorations: std.AutoHashMapUnmanaged(struct { IdRef, spec.Decoration }, void) = .empty,
+ builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
bool_const: [2]?IdRef = .{ null, null },
} = .{},
@@ -199,9 +202,6 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module.
entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty,
-/// The list of extended instruction sets that should be imported.
-extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
-
pub fn init(gpa: Allocator, target: std.Target) Module {
const version_minor: u8 = blk: {
// Prefer higher versions
@@ -242,15 +242,16 @@ pub fn deinit(self: *Module) void {
self.cache.vector_types.deinit(self.gpa);
self.cache.array_types.deinit(self.gpa);
self.cache.function_types.deinit(self.gpa);
- self.cache.builtins.deinit(self.gpa);
+ self.cache.capabilities.deinit(self.gpa);
+ self.cache.extensions.deinit(self.gpa);
+ self.cache.extended_instruction_set.deinit(self.gpa);
self.cache.decorations.deinit(self.gpa);
+ self.cache.builtins.deinit(self.gpa);
self.decls.deinit(self.gpa);
self.decl_deps.deinit(self.gpa);
-
self.entry_points.deinit(self.gpa);
- self.extended_instruction_set.deinit(self.gpa);
self.arena.deinit();
self.* = undefined;
@@ -339,9 +340,61 @@ fn entryPoints(self: *Module) !Section {
}
pub fn finalize(self: *Module, a: Allocator) ![]Word {
+ // Emit capabilities and extensions
+ for (std.Target.spirv.all_features) |feature| {
+ if (self.target.cpu.features.isEnabled(feature.index)) {
+ const feature_tag: std.Target.spirv.Feature = @enumFromInt(feature.index);
+ switch (feature_tag) {
+ .v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
+ .int8 => try self.addCapability(.Int8),
+ .int16 => try self.addCapability(.Int16),
+ .int64 => try self.addCapability(.Int64),
+ .float16 => try self.addCapability(.Float16),
+ .float64 => try self.addCapability(.Float64),
+ .addresses => if (self.hasFeature(.shader)) {
+ try self.addCapability(.PhysicalStorageBufferAddresses);
+ try self.addExtension("SPV_KHR_physical_storage_buffer");
+ } else {
+ try self.addCapability(.Addresses);
+ },
+ .matrix => try self.addCapability(.Matrix),
+ .kernel => try self.addCapability(.Kernel),
+ .generic_pointer => try self.addCapability(.GenericPointer),
+ .vector16 => try self.addCapability(.Vector16),
+ .shader => try self.addCapability(.Shader),
+ }
+ }
+ }
+
+ // Emit memory model
+ const addressing_model: spec.AddressingModel = blk: {
+ if (self.hasFeature(.shader)) {
+ break :blk switch (self.target.cpu.arch) {
+ .spirv32 => .Logical, // TODO: I don't think this will ever be implemented.
+ .spirv64 => .PhysicalStorageBuffer64,
+ else => unreachable,
+ };
+ } else if (self.hasFeature(.kernel)) {
+ break :blk switch (self.target.cpu.arch) {
+ .spirv32 => .Physical32,
+ .spirv64 => .Physical64,
+ else => unreachable,
+ };
+ }
+
+ unreachable;
+ };
+ try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
+ .addressing_model = addressing_model,
+ .memory_model = switch (self.target.os.tag) {
+ .opencl => .OpenCL,
+ .vulkan, .opengl => .GLSL450,
+ else => unreachable,
+ },
+ });
+
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
// TODO: Audit calls to allocId() in this function to make it idempotent.
-
var entry_points = try self.entryPoints();
defer entry_points.deinit(self.gpa);
@@ -405,11 +458,23 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
try self.declareDeclDeps(decl_index, func.decl_deps.keys());
}
+pub fn addCapability(self: *Module, cap: spec.Capability) !void {
+ const entry = try self.cache.capabilities.getOrPut(self.gpa, cap);
+ if (entry.found_existing) return;
+ try self.sections.capabilities.emit(self.gpa, .OpCapability, .{ .capability = cap });
+}
+
+pub fn addExtension(self: *Module, ext: []const u8) !void {
+ const entry = try self.cache.extensions.getOrPut(self.gpa, ext);
+ if (entry.found_existing) return;
+ try self.sections.extensions.emit(self.gpa, .OpExtension, .{ .name = ext });
+}
+
/// Imports or returns the existing id of an extended instruction set
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
assert(set != .core);
- const gop = try self.extended_instruction_set.getOrPut(self.gpa, set);
+ const gop = try self.cache.extended_instruction_set.getOrPut(self.gpa, set);
if (gop.found_existing) return gop.value_ptr.*;
const result_id = self.allocId();