aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2022-01-29 15:59:42 +0200
committerGitHub <noreply@github.com>2022-01-29 15:59:42 +0200
commit9f16d9ed07275209946b9e733c30be1bb0a1ae33 (patch)
treeb4a226f51cb417231dd8e8835173210abc4e7b94 /src/codegen
parente288148f60770a2cfa4c64f832b599172c383d36 (diff)
parent98ee39d1b0ed516428c611d8dc1e52d21c786f97 (diff)
downloadzig-9f16d9ed07275209946b9e733c30be1bb0a1ae33.tar.gz
zig-9f16d9ed07275209946b9e733c30be1bb0a1ae33.zip
Merge pull request #10665 from Snektron/spirv-improvements
spir-v improvements
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/spirv.zig741
-rw-r--r--src/codegen/spirv/Module.zig428
-rw-r--r--src/codegen/spirv/Section.zig423
-rw-r--r--src/codegen/spirv/spec.zig1053
-rw-r--r--src/codegen/spirv/type.zig433
5 files changed, 2615 insertions, 463 deletions
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index b4f02a14a7..0d9d1ae223 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -4,9 +4,6 @@ const Target = std.Target;
const log = std.log.scoped(.codegen);
const assert = std.debug.assert;
-const spec = @import("spirv/spec.zig");
-const Opcode = spec.Opcode;
-
const Module = @import("../Module.zig");
const Decl = Module.Decl;
const Type = @import("../type.zig").Type;
@@ -15,187 +12,78 @@ const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-pub const Word = u32;
-pub const ResultId = u32;
+const spec = @import("spirv/spec.zig");
+const Opcode = spec.Opcode;
+const Word = spec.Word;
+const IdRef = spec.IdRef;
+const IdResult = spec.IdResult;
+const IdResultType = spec.IdResultType;
+
+const SpvModule = @import("spirv/Module.zig");
+const SpvSection = @import("spirv/Section.zig");
+const SpvType = @import("spirv/type.zig").Type;
-pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage);
-pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId);
+const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
const IncomingBlock = struct {
- src_label_id: ResultId,
- break_value_id: ResultId,
+ src_label_id: IdRef,
+ break_value_id: IdRef,
};
-pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct {
- label_id: ResultId,
+pub const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
+ label_id: IdRef,
incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock),
});
-pub fn writeOpcode(code: *std.ArrayList(Word), opcode: Opcode, arg_count: u16) !void {
- const word_count: Word = arg_count + 1;
- try code.append((word_count << 16) | @enumToInt(opcode));
-}
-
-pub fn writeInstruction(code: *std.ArrayList(Word), opcode: Opcode, args: []const Word) !void {
- try writeOpcode(code, opcode, @intCast(u16, args.len));
- try code.appendSlice(args);
-}
-
-pub fn writeInstructionWithString(code: *std.ArrayList(Word), opcode: Opcode, args: []const Word, str: []const u8) !void {
- // Str needs to be written zero-terminated, so we need to add one to the length.
- const zero_terminated_len = str.len + 1;
- const str_words = (zero_terminated_len + @sizeOf(Word) - 1) / @sizeOf(Word);
-
- try writeOpcode(code, opcode, @intCast(u16, args.len + str_words));
- try code.ensureUnusedCapacity(args.len + str_words);
- code.appendSliceAssumeCapacity(args);
-
- // TODO: Not actually sure whether this is correct for big-endian.
- // See https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html#Literal
- var i: usize = 0;
- while (i < zero_terminated_len) : (i += @sizeOf(Word)) {
- var word: Word = 0;
-
- var j: usize = 0;
- while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
- word |= @as(Word, str[i + j]) << @intCast(std.math.Log2Int(Word), j * std.meta.bitCount(u8));
- }
-
- code.appendAssumeCapacity(word);
- }
-}
-
-/// This structure represents a SPIR-V (binary) module being compiled, and keeps track of all relevant information.
-/// That includes the actual instructions, the current result-id bound, and data structures for querying result-id's
-/// of data which needs to be persistent over different calls to Decl code generation.
-pub const SPIRVModule = struct {
- /// A general-purpose allocator which may be used to allocate temporary resources required for compilation.
- gpa: Allocator,
-
- /// The parent module.
+/// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that.
+pub const DeclGen = struct {
+ /// The Zig module that we are generating decls for.
module: *Module,
- /// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these.
- next_result_id: ResultId,
-
- /// Code of the actual SPIR-V binary, divided into the relevant logical sections.
- /// Note: To save some bytes, these could also be unmanaged, but since there is only one instance of SPIRVModule
- /// and this removes some clutter in the rest of the backend, it's fine like this.
- binary: struct {
- /// OpCapability and OpExtension instructions (in that order).
- capabilities_and_extensions: std.ArrayList(Word),
-
- /// OpString, OpSourceExtension, OpSource, OpSourceContinued.
- debug_strings: std.ArrayList(Word),
-
- /// Type declaration instructions, constant instructions, global variable declarations, OpUndef instructions.
- types_globals_constants: std.ArrayList(Word),
-
- /// Regular functions.
- fn_decls: std.ArrayList(Word),
- },
-
- /// Global type cache to reduce the amount of generated types.
- types: TypeMap,
-
- /// Cache for results of OpString instructions for module file names fed to OpSource.
- /// Since OpString is pretty much only used for those, we don't need to keep track of all strings,
- /// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
- file_names: std.StringHashMap(ResultId),
-
- pub fn init(gpa: Allocator, module: *Module) SPIRVModule {
- return .{
- .gpa = gpa,
- .module = module,
- .next_result_id = 1, // 0 is an invalid SPIR-V result ID.
- .binary = .{
- .capabilities_and_extensions = std.ArrayList(Word).init(gpa),
- .debug_strings = std.ArrayList(Word).init(gpa),
- .types_globals_constants = std.ArrayList(Word).init(gpa),
- .fn_decls = std.ArrayList(Word).init(gpa),
- },
- .types = TypeMap.init(gpa),
- .file_names = std.StringHashMap(ResultId).init(gpa),
- };
- }
-
- pub fn deinit(self: *SPIRVModule) void {
- self.file_names.deinit();
- self.types.deinit();
-
- self.binary.fn_decls.deinit();
- self.binary.types_globals_constants.deinit();
- self.binary.debug_strings.deinit();
- self.binary.capabilities_and_extensions.deinit();
- }
-
- pub fn allocResultId(self: *SPIRVModule) Word {
- defer self.next_result_id += 1;
- return self.next_result_id;
- }
-
- pub fn resultIdBound(self: *SPIRVModule) Word {
- return self.next_result_id;
- }
-
- fn resolveSourceFileName(self: *SPIRVModule, decl: *Decl) !ResultId {
- const path = decl.getFileScope().sub_file_path;
- const result = try self.file_names.getOrPut(path);
- if (!result.found_existing) {
- result.value_ptr.* = self.allocResultId();
- try writeInstructionWithString(&self.binary.debug_strings, .OpString, &[_]Word{result.value_ptr.*}, path);
- try writeInstruction(&self.binary.debug_strings, .OpSource, &[_]Word{
- @enumToInt(spec.SourceLanguage.Unknown), // TODO: Register Zig source language.
- 0, // TODO: Zig version as u32?
- result.value_ptr.*,
- });
- }
+ /// The SPIR-V module code should be put in.
+ spv: *SpvModule,
- return result.value_ptr.*;
- }
-};
-
-/// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that.
-pub const DeclGen = struct {
- /// The SPIR-V module code should be put in.
- spv: *SPIRVModule,
+ /// The decl we are currently generating code for.
+ decl: *Decl,
+ /// The intermediate code of the declaration we are currently generating. Note: If
+ /// the declaration is not a function, this value will be undefined!
air: Air,
+
+ /// The liveness analysis of the intermediate code for the declaration we are currently generating.
+ /// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
- args: std.ArrayList(ResultId),
+ args: std.ArrayListUnmanaged(IdRef) = .{},
/// A counter to keep track of how many `arg` instructions we've seen yet.
next_arg_index: u32,
/// A map keeping track of which instruction generated which result-id.
- inst_results: InstMap,
+ inst_results: InstMap = .{},
/// We need to keep track of result ids for block labels, as well as the 'incoming'
/// blocks for a block.
- blocks: BlockMap,
+ blocks: BlockMap = .{},
/// The label of the SPIR-V block we are currently generating.
- current_block_label_id: ResultId,
+ current_block_label_id: IdRef,
/// The actual instructions for this function. We need to declare all locals in
/// the first block, and because we don't know which locals there are going to be,
/// we're just going to generate everything after the locals-section in this array.
/// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the
- /// initial OpLabel. These will be generated into spv.binary.fn_decls directly.
- code: std.ArrayList(Word),
+ /// initial OpLabel. These will be generated into spv.sections.functions directly.
+ code: SpvSection = .{},
- /// The decl we are currently generating code for.
- decl: *Decl,
-
- /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message.
+ /// If `gen` returned `Error.CodegenFail`, this contains an explanatory message.
/// Memory is owned by `module.gpa`.
error_msg: ?*Module.ErrorMsg,
/// Possible errors the `gen` function may return.
- const Error = error{ AnalysisFail, OutOfMemory };
+ const Error = error{ CodegenFail, OutOfMemory };
/// This structure is used to return information about a type typically used for
/// arithmetic operations. These types may either be integers, floats, or a vector
@@ -244,18 +132,15 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
- pub fn init(spv: *SPIRVModule) DeclGen {
+ pub fn init(module: *Module, spv: *SpvModule) DeclGen {
return .{
+ .module = module,
.spv = spv,
+ .decl = undefined,
.air = undefined,
.liveness = undefined,
- .args = std.ArrayList(ResultId).init(spv.gpa),
.next_arg_index = undefined,
- .inst_results = InstMap.init(spv.gpa),
- .blocks = BlockMap.init(spv.gpa),
.current_block_label_id = undefined,
- .code = std.ArrayList(Word).init(spv.gpa),
- .decl = undefined,
.error_msg = undefined,
};
}
@@ -265,6 +150,7 @@ pub const DeclGen = struct {
/// returns such a reportable error, it is valid to be called again for a different decl.
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
+ self.decl = decl;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@@ -272,35 +158,50 @@ pub const DeclGen = struct {
self.inst_results.clearRetainingCapacity();
self.blocks.clearRetainingCapacity();
self.current_block_label_id = undefined;
- self.code.items.len = 0;
- self.decl = decl;
+ self.code.reset();
self.error_msg = null;
- try self.genDecl();
- return self.error_msg;
+ self.genDecl() catch |err| switch (err) {
+ error.CodegenFail => return self.error_msg,
+ else => |others| return others,
+ };
+
+ return null;
}
/// Free resources owned by the DeclGen.
pub fn deinit(self: *DeclGen) void {
- self.args.deinit();
- self.inst_results.deinit();
- self.blocks.deinit();
- self.code.deinit();
+ self.args.deinit(self.spv.gpa);
+ self.inst_results.deinit(self.spv.gpa);
+ self.blocks.deinit(self.spv.gpa);
+ self.code.deinit(self.spv.gpa);
}
+ /// Return the target which we are currently compiling for.
fn getTarget(self: *DeclGen) std.Target {
- return self.spv.module.getTarget();
+ return self.module.getTarget();
}
fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src: LazySrcLoc = .{ .node_offset = 0 };
const src_loc = src.toSrcLoc(self.decl);
- self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args);
- return error.AnalysisFail;
+ assert(self.error_msg == null);
+ self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
+ return error.CodegenFail;
}
- fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId {
+ fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
+ @setCold(true);
+ const src: LazySrcLoc = .{ .node_offset = 0 };
+ const src_loc = src.toSrcLoc(self.decl);
+ assert(self.error_msg == null);
+ self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "TODO (SPIR-V): " ++ format, args);
+ return error.CodegenFail;
+ }
+
+ /// Fetch the result-id for a previously generated instruction or constant.
+ fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
if (self.air.value(inst)) |val| {
return self.genConstant(self.air.typeOf(inst), val);
}
@@ -308,9 +209,13 @@ pub const DeclGen = struct {
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
- fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void {
- try writeInstruction(&self.code, .OpLabel, &[_]Word{label_id});
- self.current_block_label_id = label_id;
+ /// Start a new SPIR-V block, Emits the label of the new block, and stores which
+ /// block we are currently generating.
+ /// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
+ /// keep track of the previous block.
+ fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void {
+ try self.code.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id });
+ self.current_block_label_id = label_id.toRef();
}
/// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need
@@ -392,32 +297,37 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
- break :blk ArithmeticTypeInfo{ .bits = int_info.bits, .is_vector = false, .signedness = int_info.signedness, .class = if (maybe_backing_bits) |backing_bits|
- if (backing_bits == int_info.bits)
- ArithmeticTypeInfo.Class.integer
+ break :blk ArithmeticTypeInfo{
+ .bits = int_info.bits,
+ .is_vector = false,
+ .signedness = int_info.signedness,
+ .class = if (maybe_backing_bits) |backing_bits|
+ if (backing_bits == int_info.bits)
+ ArithmeticTypeInfo.Class.integer
+ else
+ ArithmeticTypeInfo.Class.strange_integer
else
- ArithmeticTypeInfo.Class.strange_integer
- else
- .composite_integer };
+ .composite_integer,
+ };
},
// As of yet, there is no vector support in the self-hosted compiler.
- .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
+ .Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
- else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
+ else => self.todo("implement arithmeticTypeInfo for {}", .{ty}),
};
}
/// Generate a constant representing `val`.
/// TODO: Deduplication?
- fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId {
+ fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!IdRef {
const target = self.getTarget();
- const code = &self.spv.binary.types_globals_constants;
- const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(ty);
+ const section = &self.spv.sections.types_globals_constants;
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(ty);
if (val.isUndef()) {
- try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id });
- return result_id;
+ try section.emit(self.spv.gpa, .OpUndef, .{ .id_result_type = result_type_id, .id_result = result_id });
+ return result_id.toRef();
}
switch (ty.zigTypeTag()) {
@@ -425,101 +335,96 @@ pub const DeclGen = struct {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
// Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
+ return self.todo("implement composite int constants for {}", .{ty});
};
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
// SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
// might need to be updated.
assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
+
+ // Note, value is required to be sign-extended, so we don't need to mask off the upper bits.
+ // See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal
var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
- // Mask the low bits which make up the actual integer. This is to make sure that negative values
- // only use the actual bits of the type.
- // TODO: Should this be the backing type bits or the actual type bits?
- int_bits &= (@as(u64, 1) << @intCast(u6, backing_bits)) - 1;
-
- switch (backing_bits) {
- 0 => unreachable,
- 1...32 => try writeInstruction(code, .OpConstant, &[_]Word{
- result_type_id,
- result_id,
- @truncate(u32, int_bits),
- }),
- 33...64 => try writeInstruction(code, .OpConstant, &[_]Word{
- result_type_id,
- result_id,
- @truncate(u32, int_bits),
- @truncate(u32, int_bits >> @bitSizeOf(u32)),
- }),
- else => unreachable, // backing_bits is bounded by largestSupportedIntBits.
- }
+ const value: spec.LiteralContextDependentNumber = switch (backing_bits) {
+ 1...32 => .{ .uint32 = @truncate(u32, int_bits) },
+ 33...64 => .{ .uint64 = int_bits },
+ else => unreachable,
+ };
+
+ try section.emit(self.spv.gpa, .OpConstant, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .value = value,
+ });
},
.Bool => {
- const opcode: Opcode = if (val.toBool()) .OpConstantTrue else .OpConstantFalse;
- try writeInstruction(code, opcode, &[_]Word{ result_type_id, result_id });
+ const operands = .{ .id_result_type = result_type_id, .id_result = result_id };
+ if (val.toBool()) {
+ try section.emit(self.spv.gpa, .OpConstantTrue, operands);
+ } else {
+ try section.emit(self.spv.gpa, .OpConstantFalse, operands);
+ }
},
.Float => {
// At this point we are guaranteed that the target floating point type is supported, otherwise the function
- // would have exited at genType(ty).
-
- // f16 and f32 require one word of storage. f64 requires 2, low-order first.
-
- switch (ty.floatBits(target)) {
- 16 => try writeInstruction(code, .OpConstant, &[_]Word{ result_type_id, result_id, @bitCast(u16, val.toFloat(f16)) }),
- 32 => try writeInstruction(code, .OpConstant, &[_]Word{ result_type_id, result_id, @bitCast(u32, val.toFloat(f32)) }),
- 64 => {
- const float_bits = @bitCast(u64, val.toFloat(f64));
- try writeInstruction(code, .OpConstant, &[_]Word{
- result_type_id,
- result_id,
- @truncate(u32, float_bits),
- @truncate(u32, float_bits >> @bitSizeOf(u32)),
- });
- },
- 128 => unreachable, // Filtered out in the call to genType.
- // TODO: Insert case for long double when the layout for that is determined.
+ // would have exited at resolveTypeId(ty).
+
+ const value: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
+ // Prevent upcasting to f32 by bitcasting and writing as a uint32.
+ 16 => .{ .uint32 = @bitCast(u16, val.toFloat(f16)) },
+ 32 => .{ .float32 = val.toFloat(f32) },
+ 64 => .{ .float64 = val.toFloat(f64) },
+ 128 => unreachable, // Filtered out in the call to resolveTypeId.
+ // TODO: Insert case for long double when the layout for that is determined?
else => unreachable,
- }
+ };
+
+ try section.emit(self.spv.gpa, .OpConstant, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .value = value,
+ });
},
.Void => unreachable,
- else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
+ else => return self.todo("constant generation of type {}", .{ty}),
}
- return result_id;
+ return result_id.toRef();
}
- fn genType(self: *DeclGen, ty: Type) Error!ResultId {
- // We can't use getOrPut here so we can recursively generate types.
- if (self.spv.types.get(ty)) |already_generated| {
- return already_generated;
- }
+ /// Turn a Zig type into a SPIR-V Type, and return its type result-id.
+ fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
+ return self.spv.typeResultId(try self.resolveType(ty));
+ }
+ /// Turn a Zig type into a SPIR-V Type, and return a reference to it.
+ fn resolveType(self: *DeclGen, ty: Type) Error!SpvType.Ref {
const target = self.getTarget();
- const code = &self.spv.binary.types_globals_constants;
- const result_id = self.spv.allocResultId();
-
- switch (ty.zigTypeTag()) {
- .Void => try writeInstruction(code, .OpTypeVoid, &[_]Word{result_id}),
- .Bool => try writeInstruction(code, .OpTypeBool, &[_]Word{result_id}),
- .Int => {
+ return switch (ty.zigTypeTag()) {
+ .Void => try self.spv.resolveType(SpvType.initTag(.void)),
+ .Bool => blk: {
+ // TODO: SPIR-V booleans are opaque. For local variables this is fine, but for structs
+ // members we want to use integer types instead.
+ break :blk try self.spv.resolveType(SpvType.initTag(.bool));
+ },
+ .Int => blk: {
const int_info = ty.intInfo(target);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
- // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
- return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
+ // TODO: Integers too big for any native type are represented as "composite integers":
+ // An array of largestSupportedIntBits.
+ return self.todo("Implement composite int type {}", .{ty});
};
- // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
- try writeInstruction(code, .OpTypeInt, &[_]Word{
- result_id,
- backing_bits,
- switch (int_info.signedness) {
- .unsigned => 0,
- .signed => 1,
- },
- });
+ const payload = try self.spv.arena.create(SpvType.Payload.Int);
+ payload.* = .{
+ .width = backing_bits,
+ .signedness = int_info.signedness,
+ };
+ break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
},
- .Float => {
+ .Float => blk: {
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
// so if the float is not supported, just return an error.
const bits = ty.floatBits(target);
@@ -535,37 +440,34 @@ pub const DeclGen = struct {
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
- try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits });
+ const payload = try self.spv.arena.create(SpvType.Payload.Float);
+ payload.* = .{
+ .width = bits,
+ };
+ break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
},
- .Fn => {
+ .Fn => blk: {
// We only support zig-calling-convention functions, no varargs.
if (ty.fnCallingConvention() != .Unspecified)
return self.fail("Unsupported calling convention for SPIR-V", .{});
if (ty.fnIsVarArgs())
- return self.fail("VarArgs unsupported for SPIR-V", .{});
-
- // In order to avoid a temporary here, first generate all the required types and then simply look them up
- // when generating the function type.
- const params = ty.fnParamLen();
- var i: usize = 0;
- while (i < params) : (i += 1) {
- _ = try self.genType(ty.fnParamType(i));
- }
+ return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
- const return_type_id = try self.genType(ty.fnReturnType());
+ const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
+ for (param_types) |*param, i| {
+ param.* = try self.resolveType(ty.fnParamType(i));
+ }
- // result id + result type id + parameter type ids.
- try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
- try code.appendSlice(&.{ result_id, return_type_id });
+ const return_type = try self.resolveType(ty.fnReturnType());
- i = 0;
- while (i < params) : (i += 1) {
- const param_type_id = self.spv.types.get(ty.fnParamType(i)).?;
- try code.append(param_type_id);
- }
+ const payload = try self.spv.arena.create(SpvType.Payload.Function);
+ payload.* = .{ .return_type = return_type, .parameters = param_types };
+ break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base));
+ },
+ .Pointer => {
+ // This type can now be properly implemented, but we still need to implement the storage classes as proper address spaces.
+ return self.todo("Implement type Pointer properly", .{});
},
- // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
- .Pointer => return self.fail("Cannot create pointer with unknown storage class", .{}),
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
// which work on them), so simply use those.
@@ -575,41 +477,42 @@ pub const DeclGen = struct {
// is adequate at all for this.
// TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
- return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
+ return self.todo("Implement type Vector", .{});
},
+
.Null,
.Undefined,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
- => unreachable, // Must be const or comptime.
+ => unreachable, // Must be comptime.
.BoundFn => unreachable, // this type will be deleted from the language.
- else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
- }
-
- try self.spv.types.putNoClobber(ty, result_id);
- return result_id;
+ else => |tag| return self.todo("Implement zig type '{}'", .{tag}),
+ };
}
/// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
/// TODO: The result of this needs to be cached.
- fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId {
+ fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !IdResultType {
assert(ty.zigTypeTag() == .Pointer);
- const code = &self.spv.binary.types_globals_constants;
- const result_id = self.spv.allocResultId();
+ const result_id = self.spv.allocId();
// TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
// if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
// These also relates to the pointer's address space.
- const child_id = try self.genType(ty.elemType());
+ const child_id = try self.resolveTypeId(ty.elemType());
- try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id });
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
+ .id_result = result_id,
+ .storage_class = storage_class,
+ .type = child_id.toRef(),
+ });
- return result_id;
+ return result_id.toResultType();
}
fn genDecl(self: *DeclGen) !void {
@@ -618,41 +521,47 @@ pub const DeclGen = struct {
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
- const prototype_id = try self.genType(decl.ty);
- try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{
- self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
- result_id,
- @bitCast(Word, spec.FunctionControl{}), // TODO: We can set inline here if the type requires it.
- prototype_id,
+ const prototype_id = try self.resolveTypeId(decl.ty);
+ try self.spv.sections.functions.emit(self.spv.gpa, .OpFunction, .{
+ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
+ .id_result = result_id,
+ .function_control = .{}, // TODO: We can set inline here if the type requires it.
+ .function_type = prototype_id.toRef(),
});
const params = decl.ty.fnParamLen();
var i: usize = 0;
- try self.args.ensureUnusedCapacity(params);
+ try self.args.ensureUnusedCapacity(self.spv.gpa, params);
while (i < params) : (i += 1) {
- const param_type_id = self.spv.types.get(decl.ty.fnParamType(i)).?;
- const arg_result_id = self.spv.allocResultId();
- try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionParameter, &[_]Word{ param_type_id, arg_result_id });
- self.args.appendAssumeCapacity(arg_result_id);
+ const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i));
+ const arg_result_id = self.spv.allocId();
+ try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionParameter, .{
+ .id_result_type = param_type_id,
+ .id_result = arg_result_id,
+ });
+ self.args.appendAssumeCapacity(arg_result_id.toRef());
}
// TODO: This could probably be done in a better way...
- const root_block_id = self.spv.allocResultId();
+ const root_block_id = self.spv.allocId();
- // We need to generate the label directly in the fn_decls here because we're going to write the local variables after
- // here. Since we're not generating in self.code, we're just going to bypass self.beginSPIRVBlock here.
- try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id});
- self.current_block_label_id = root_block_id;
+ // We need to generate the label directly in the functions section here because we're going to write the local variables after
+ // here. Since we're not generating in self.code, we're just going to bypass self.beginSpvBlock here.
+ try self.spv.sections.functions.emit(self.spv.gpa, .OpLabel, .{
+ .id_result = root_block_id,
+ });
+ self.current_block_label_id = root_block_id.toRef();
const main_body = self.air.getMainBody();
try self.genBody(main_body);
- // Append the actual code into the fn_decls section.
- try self.spv.binary.fn_decls.appendSlice(self.code.items);
- try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{});
+ // Append the actual code into the functions section.
+ try self.spv.sections.functions.append(self.spv.gpa, self.code);
+ try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionEnd, {});
} else {
- return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
+ // TODO
+ // return self.todo("generate decl type {}", .{decl.ty.zigTypeTag()});
}
}
@@ -666,9 +575,9 @@ pub const DeclGen = struct {
const air_tags = self.air.instructions.items(.tag);
const result_id = switch (air_tags[inst]) {
// zig fmt: off
- .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
- .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
- .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
+ .add, .addwrap => try self.airArithOp(inst, .OpFAdd, .OpIAdd, .OpIAdd),
+ .sub, .subwrap => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub),
+ .mul, .mulwrap => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul),
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
@@ -678,12 +587,12 @@ pub const DeclGen = struct {
.not => try self.airNot(inst),
- .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
- .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
- .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
- .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
- .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
- .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
+ .cmp_eq => try self.airCmp(inst, .OpFOrdEqual, .OpLogicalEqual, .OpIEqual),
+ .cmp_neq => try self.airCmp(inst, .OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual),
+ .cmp_gt => try self.airCmp(inst, .OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan),
+ .cmp_gte => try self.airCmp(inst, .OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual),
+ .cmp_lt => try self.airCmp(inst, .OpFOrdLessThan, .OpSLessThan, .OpULessThan),
+ .cmp_lte => try self.airCmp(inst, .OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual),
.arg => self.airArg(),
.alloc => try self.airAlloc(inst),
@@ -701,27 +610,30 @@ pub const DeclGen = struct {
.unreach => return self.airUnreach(),
// zig fmt: on
- else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{
+ else => |tag| return self.todo("implement AIR tag {s}", .{
@tagName(tag),
}),
};
- try self.inst_results.putNoClobber(inst, result_id);
+ try self.inst_results.putNoClobber(self.spv.gpa, inst, result_id);
}
- fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
+ fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !IdRef {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
- const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(self.air.typeOfIndex(inst));
- try writeInstruction(&self.code, opcode, &[_]Word{
- result_type_id, result_id, lhs_id, rhs_id,
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst));
+ try self.code.emit(self.spv.gpa, opcode, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .operand_1 = lhs_id,
+ .operand_2 = rhs_id,
});
- return result_id;
+ return result_id.toRef();
}
- fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef {
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
// the result to be the same as the LHS and RHS, which matches SPIR-V.
const ty = self.air.typeOfIndex(inst);
@@ -729,8 +641,8 @@ pub const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
- const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(ty);
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(ty);
assert(self.air.typeOf(bin_op.lhs).eql(ty));
assert(self.air.typeOf(bin_op.rhs).eql(ty));
@@ -741,10 +653,10 @@ pub const DeclGen = struct {
const opcode_index: usize = switch (info.class) {
.composite_integer => {
- return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ return self.todo("binary operations for composite integers", .{});
},
.strange_integer => {
- return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
+ return self.todo("binary operations for strange integers", .{});
},
.integer => switch (info.signedness) {
.signed => @as(usize, 1),
@@ -753,21 +665,32 @@ pub const DeclGen = struct {
.float => 0,
else => unreachable,
};
- const opcode = ops[opcode_index];
- try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
+ const operands = .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .operand_1 = lhs_id,
+ .operand_2 = rhs_id,
+ };
+
+ switch (opcode_index) {
+ 0 => try self.code.emit(self.spv.gpa, fop, operands),
+ 1 => try self.code.emit(self.spv.gpa, sop, operands),
+ 2 => try self.code.emit(self.spv.gpa, uop, operands),
+ else => unreachable,
+ }
// TODO: Trap on overflow? Probably going to be annoying.
// TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
- return result_id;
+ return result_id.toRef();
}
- fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+ fn airCmp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
- const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(Type.initTag(.bool));
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
const op_ty = self.air.typeOf(bin_op.lhs);
assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
@@ -777,10 +700,10 @@ pub const DeclGen = struct {
const opcode_index: usize = switch (info.class) {
.composite_integer => {
- return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+ return self.todo("binary operations for composite integers", .{});
},
.strange_integer => {
- return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
+ return self.todo("comparison for strange integers", .{});
},
.float => 0,
.bool => 1,
@@ -789,53 +712,71 @@ pub const DeclGen = struct {
.unsigned => @as(usize, 2),
},
};
- const opcode = ops[opcode_index];
- try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
- return result_id;
+ const operands = .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .operand_1 = lhs_id,
+ .operand_2 = rhs_id,
+ };
+
+ switch (opcode_index) {
+ 0 => try self.code.emit(self.spv.gpa, fop, operands),
+ 1 => try self.code.emit(self.spv.gpa, sop, operands),
+ 2 => try self.code.emit(self.spv.gpa, uop, operands),
+ else => unreachable,
+ }
+
+ return result_id.toRef();
}
- fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ fn airNot(self: *DeclGen, inst: Air.Inst.Index) !IdRef {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
- const result_id = self.spv.allocResultId();
- const result_type_id = try self.genType(Type.initTag(.bool));
- const opcode: Opcode = .OpLogicalNot;
- try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id });
- return result_id;
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
+ try self.code.emit(self.spv.gpa, .OpLogicalNot, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .operand = operand_id,
+ });
+ return result_id.toRef();
}
- fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !IdRef {
const ty = self.air.typeOfIndex(inst);
const storage_class = spec.StorageClass.Function;
const result_type_id = try self.genPointerType(ty, storage_class);
- const result_id = self.spv.allocResultId();
+ const result_id = self.spv.allocId();
- // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that
+ // Rather than generating into code here, we're just going to generate directly into the functions section so that
// variable declarations appear in the first block of the function.
- try writeInstruction(&self.spv.binary.fn_decls, .OpVariable, &[_]Word{ result_type_id, result_id, @enumToInt(storage_class) });
-
- return result_id;
+ try self.spv.sections.functions.emit(self.spv.gpa, .OpVariable, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .storage_class = storage_class,
+ });
+ return result_id.toRef();
}
- fn airArg(self: *DeclGen) ResultId {
+ fn airArg(self: *DeclGen) IdRef {
defer self.next_arg_index += 1;
return self.args.items[self.next_arg_index];
}
- fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
- // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
+ fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ // In AIR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
// "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
// ir.Block in a different SPIR-V block.
- const label_id = self.spv.allocResultId();
+ const label_id = self.spv.allocId();
// 4 chosen as arbitrary initial capacity.
var incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.spv.gpa, 4);
- try self.blocks.putNoClobber(inst, .{
- .label_id = label_id,
+ try self.blocks.putNoClobber(self.spv.gpa, inst, .{
+ .label_id = label_id.toRef(),
.incoming_blocks = &incoming_blocks,
});
defer {
@@ -849,7 +790,7 @@ pub const DeclGen = struct {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
- try self.beginSPIRVBlock(label_id);
+ try self.beginSpvBlock(label_id);
// If this block didn't produce a value, simply return here.
if (!ty.hasRuntimeBits())
@@ -857,21 +798,21 @@ pub const DeclGen = struct {
// Combine the result from the blocks using the Phi instruction.
- const result_id = self.spv.allocResultId();
+ const result_id = self.spv.allocId();
// TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
- // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
+ // are not allowed to be created from a phi node, and throw an error for those. For now, resolveTypeId already throws
// an error for pointers.
- const result_type_id = try self.genType(ty);
+ const result_type_id = try self.resolveTypeId(ty);
_ = result_type_id;
- try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
+ try self.code.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
for (incoming_blocks.items) |incoming| {
- try self.code.appendSlice(&[_]Word{ incoming.break_value_id, incoming.src_label_id });
+ self.code.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id });
}
- return result_id;
+ return result_id.toRef();
}
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -885,7 +826,7 @@ pub const DeclGen = struct {
try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
}
- try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
+ try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id });
}
fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -896,63 +837,70 @@ pub const DeclGen = struct {
const condition_id = try self.resolve(pl_op.operand);
// These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
- const then_label_id = self.spv.allocResultId();
- const else_label_id = self.spv.allocResultId();
+ const then_label_id = self.spv.allocId();
+ const else_label_id = self.spv.allocId();
// TODO: We can generate OpSelectionMerge here if we know the target block that both of these will resolve to,
// but i don't know if those will always resolve to the same block.
- try writeInstruction(&self.code, .OpBranchConditional, &[_]Word{
- condition_id,
- then_label_id,
- else_label_id,
+ try self.code.emit(self.spv.gpa, .OpBranchConditional, .{
+ .condition = condition_id,
+ .true_label = then_label_id.toRef(),
+ .false_label = else_label_id.toRef(),
});
- try self.beginSPIRVBlock(then_label_id);
+ try self.beginSpvBlock(then_label_id);
try self.genBody(then_body);
- try self.beginSPIRVBlock(else_label_id);
+ try self.beginSpvBlock(else_label_id);
try self.genBody(else_body);
}
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
- try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
+ try self.code.emit(self.spv.gpa, .OpLine, .{
+ .file = src_fname_id,
+ .line = dbg_stmt.line,
+ .column = dbg_stmt.column,
+ });
}
- fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+ fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !IdRef {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const ty = self.air.typeOfIndex(inst);
- const result_type_id = try self.genType(ty);
- const result_id = self.spv.allocResultId();
+ const result_type_id = try self.resolveTypeId(ty);
+ const result_id = self.spv.allocId();
- const operands = if (ty.isVolatilePtr())
- &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
- else
- &[_]Word{ result_type_id, result_id, operand_id };
+ const access = spec.MemoryAccess.Extended{
+ .Volatile = ty.isVolatilePtr(),
+ };
- try writeInstruction(&self.code, .OpLoad, operands);
+ try self.code.emit(self.spv.gpa, .OpLoad, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .pointer = operand_id,
+ .memory_access = access,
+ });
- return result_id;
+ return result_id.toRef();
}
fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const loop_label_id = self.spv.allocResultId();
+ const loop_label_id = self.spv.allocId();
// Jump to the loop entry point
- try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
+ try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() });
// TODO: Look into OpLoopMerge.
-
- try self.beginSPIRVBlock(loop_label_id);
+ try self.beginSpvBlock(loop_label_id);
try self.genBody(body);
- try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
+ try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() });
}
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -960,9 +908,9 @@ pub const DeclGen = struct {
const operand_ty = self.air.typeOf(operand);
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(operand);
- try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
+ try self.code.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
} else {
- try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+ try self.code.emit(self.spv.gpa, .OpReturn, {});
}
}
@@ -972,15 +920,18 @@ pub const DeclGen = struct {
const src_val_id = try self.resolve(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
- const operands = if (lhs_ty.isVolatilePtr())
- &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
- else
- &[_]Word{ dst_ptr_id, src_val_id };
+ const access = spec.MemoryAccess.Extended{
+ .Volatile = lhs_ty.isVolatilePtr(),
+ };
- try writeInstruction(&self.code, .OpStore, operands);
+ try self.code.emit(self.spv.gpa, .OpStore, .{
+ .pointer = dst_ptr_id,
+ .object = src_val_id,
+ .memory_access = access,
+ });
}
fn airUnreach(self: *DeclGen) !void {
- try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
+ try self.code.emit(self.spv.gpa, .OpUnreachable, {});
}
};
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
new file mode 100644
index 0000000000..59ed1b9b78
--- /dev/null
+++ b/src/codegen/spirv/Module.zig
@@ -0,0 +1,428 @@
+//! This structure represents a SPIR-V (sections) module being compiled, and keeps track of all relevant information.
+//! That includes the actual instructions, the current result-id bound, and data structures for querying result-id's
+//! of data which needs to be persistent over different calls to Decl code generation.
+//!
+//! A SPIR-V binary module supports both little- and big endian layout. The layout is detected by the magic word in the
+//! header. Therefore, we can ignore any byte order throughout the implementation, and just use the host byte order,
+//! and make this a problem for the consumer.
+const Module = @This();
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+const ZigDecl = @import("../../Module.zig").Decl;
+
+const spec = @import("spec.zig");
+const Word = spec.Word;
+const IdRef = spec.IdRef;
+const IdResult = spec.IdResult;
+const IdResultType = spec.IdResultType;
+
+const Section = @import("Section.zig");
+const Type = @import("type.zig").Type;
+
+const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
+
+/// A general-purpose allocator which may be used to allocate resources for this module
+gpa: Allocator,
+
+/// An arena allocator used to store things that have the same lifetime as this module.
+arena: Allocator,
+
+/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
+sections: struct {
+ /// Capability instructions
+ capabilities: Section = .{},
+ /// OpExtension instructions
+ extensions: Section = .{},
+ // OpExtInstImport instructions - skip for now.
+ // memory model defined by target, not required here.
+ /// OpEntryPoint instructions.
+ entry_points: Section = .{},
+ // OpExecutionMode and OpExecutionModeId instructions - skip for now.
+ /// OpString, OpSourcExtension, OpSource, OpSourceContinued.
+ debug_strings: Section = .{},
+ // OpName, OpMemberName - skip for now.
+ // OpModuleProcessed - skip for now.
+ /// Annotation instructions (OpDecorate etc).
+ annotations: Section = .{},
+ /// Type declarations, constants, global variables
+ /// Below this section, OpLine and OpNoLine is allowed.
+ types_globals_constants: Section = .{},
+ // Functions without a body - skip for now.
+ /// Regular function definitions.
+ functions: Section = .{},
+} = .{},
+
+/// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these.
+next_result_id: Word,
+
+/// Cache for results of OpString instructions for module file names fed to OpSource.
+/// Since OpString is pretty much only used for those, we don't need to keep track of all strings,
+/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
+source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
+
+/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
+/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
+/// the final binary.
+/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
+type_cache: TypeCache = .{},
+
+pub fn init(gpa: Allocator, arena: Allocator) Module {
+ return .{
+ .gpa = gpa,
+ .arena = arena,
+ .next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1.
+ };
+}
+
+pub fn deinit(self: *Module) void {
+ self.sections.capabilities.deinit(self.gpa);
+ self.sections.extensions.deinit(self.gpa);
+ self.sections.entry_points.deinit(self.gpa);
+ self.sections.debug_strings.deinit(self.gpa);
+ self.sections.annotations.deinit(self.gpa);
+ self.sections.types_globals_constants.deinit(self.gpa);
+ self.sections.functions.deinit(self.gpa);
+
+ self.source_file_names.deinit(self.gpa);
+ self.type_cache.deinit(self.gpa);
+
+ self.* = undefined;
+}
+
+pub fn allocId(self: *Module) spec.IdResult {
+ defer self.next_result_id += 1;
+ return .{ .id = self.next_result_id };
+}
+
+pub fn idBound(self: Module) Word {
+ return self.next_result_id;
+}
+
+/// Emit this module as a spir-v binary.
+pub fn flush(self: Module, file: std.fs.File) !void {
+ // See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
+
+ const header = [_]Word{
+ spec.magic_number,
+ (spec.version.major << 16) | (spec.version.minor << 8),
+ 0, // TODO: Register Zig compiler magic number.
+ self.idBound(),
+ 0, // Schema (currently reserved for future use)
+ };
+
+ // Note: needs to be kept in order according to section 2.3!
+ const buffers = &[_][]const Word{
+ &header,
+ self.sections.capabilities.toWords(),
+ self.sections.extensions.toWords(),
+ self.sections.entry_points.toWords(),
+ self.sections.debug_strings.toWords(),
+ self.sections.annotations.toWords(),
+ self.sections.types_globals_constants.toWords(),
+ self.sections.functions.toWords(),
+ };
+
+ var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
+ var file_size: u64 = 0;
+ for (iovc_buffers) |*iovc, i| {
+ // Note, since spir-v supports both little and big endian we can ignore byte order here and
+ // just treat the words as a sequence of bytes.
+ const bytes = std.mem.sliceAsBytes(buffers[i]);
+ iovc.* = .{ .iov_base = bytes.ptr, .iov_len = bytes.len };
+ file_size += bytes.len;
+ }
+
+ try file.seekTo(0);
+ try file.setEndPos(file_size);
+ try file.pwritevAll(&iovc_buffers, 0);
+}
+
+/// Fetch the result-id of an OpString instruction that encodes the path of the source
+/// file of the decl. This function may also emit an OpSource with source-level information regarding
+/// the decl.
+pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
+ const path = decl.getFileScope().sub_file_path;
+ const result = try self.source_file_names.getOrPut(self.gpa, path);
+ if (!result.found_existing) {
+ const file_result_id = self.allocId();
+ result.value_ptr.* = file_result_id.toRef();
+ try self.sections.debug_strings.emit(self.gpa, .OpString, .{
+ .id_result = file_result_id,
+ .string = path,
+ });
+
+ try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
+ .source_language = .Unknown, // TODO: Register Zig source language.
+ .version = 0, // TODO: Zig version as u32?
+ .file = file_result_id.toRef(),
+ .source = null, // TODO: Store actual source also?
+ });
+ }
+
+ return result.value_ptr.*;
+}
+
+/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
+/// and returns a cached version if that exists.
+/// Note: This function does not attempt to perform any validation on the type.
+/// The type is emitted in a shallow fashion; any child types should already
+/// be emitted at this point.
+pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
+ const result = try self.type_cache.getOrPut(self.gpa, ty);
+ if (!result.found_existing) {
+ result.value_ptr.* = try self.emitType(ty);
+ }
+ return result.index;
+}
+
+pub fn resolveTypeId(self: *Module, ty: Type) !IdRef {
+ return self.typeResultId(try self.resolveType(ty));
+}
+
+/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
+pub fn typeResultId(self: Module, type_ref: Type.Ref) IdResultType {
+ return self.type_cache.values()[type_ref];
+}
+
+/// Get the result-id of a particular type as IdRef, by Type.Ref. Asserts type_ref is valid.
+pub fn typeRefId(self: Module, type_ref: Type.Ref) IdRef {
+ return self.type_cache.values()[type_ref].toRef();
+}
+
+/// Unconditionally emit a spir-v type into the appropriate section.
+/// Note: If this function is called with a type that is already generated, it may yield an invalid module
+/// as non-pointer non-aggregrate types must me unique!
+/// Note: This function does not attempt to perform any validation on the type.
+/// The type is emitted in a shallow fashion; any child types should already
+/// be emitted at this point.
+pub fn emitType(self: *Module, ty: Type) !IdResultType {
+ const result_id = self.allocId();
+ const ref_id = result_id.toRef();
+ const types = &self.sections.types_globals_constants;
+ const annotations = &self.sections.annotations;
+ const result_id_operand = .{ .id_result = result_id };
+
+ switch (ty.tag()) {
+ .void => try types.emit(self.gpa, .OpTypeVoid, result_id_operand),
+ .bool => try types.emit(self.gpa, .OpTypeBool, result_id_operand),
+ .int => try types.emit(self.gpa, .OpTypeInt, .{
+ .id_result = result_id,
+ .width = ty.payload(.int).width,
+ .signedness = switch (ty.payload(.int).signedness) {
+ .unsigned => @as(spec.LiteralInteger, 0),
+ .signed => 1,
+ },
+ }),
+ .float => try types.emit(self.gpa, .OpTypeFloat, .{
+ .id_result = result_id,
+ .width = ty.payload(.float).width,
+ }),
+ .vector => try types.emit(self.gpa, .OpTypeVector, .{
+ .id_result = result_id,
+ .component_type = self.typeResultId(ty.childType()).toRef(),
+ .component_count = ty.payload(.vector).component_count,
+ }),
+ .matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
+ .id_result = result_id,
+ .column_type = self.typeResultId(ty.childType()).toRef(),
+ .column_count = ty.payload(.matrix).column_count,
+ }),
+ .image => {
+ const info = ty.payload(.image);
+ try types.emit(self.gpa, .OpTypeImage, .{
+ .id_result = result_id,
+ .sampled_type = self.typeResultId(ty.childType()).toRef(),
+ .dim = info.dim,
+ .depth = @enumToInt(info.depth),
+ .arrayed = @boolToInt(info.arrayed),
+ .ms = @boolToInt(info.multisampled),
+ .sampled = @enumToInt(info.sampled),
+ .image_format = info.format,
+ .access_qualifier = info.access_qualifier,
+ });
+ },
+ .sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
+ .sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
+ .id_result = result_id,
+ .image_type = self.typeResultId(ty.childType()).toRef(),
+ }),
+ .array => {
+ const info = ty.payload(.array);
+ assert(info.length != 0);
+ try types.emit(self.gpa, .OpTypeArray, .{
+ .id_result = result_id,
+ .element_type = self.typeResultId(ty.childType()).toRef(),
+ .length = .{ .id = 0 }, // TODO: info.length must be emitted as constant!
+ });
+ if (info.array_stride != 0) {
+ try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
+ }
+ },
+ .runtime_array => {
+ const info = ty.payload(.runtime_array);
+ try types.emit(self.gpa, .OpTypeRuntimeArray, .{
+ .id_result = result_id,
+ .element_type = self.typeResultId(ty.childType()).toRef(),
+ });
+ if (info.array_stride != 0) {
+ try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
+ }
+ },
+ .@"struct" => {
+ const info = ty.payload(.@"struct");
+ try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
+ types.writeOperand(IdResult, result_id);
+ for (info.members) |member| {
+ types.writeOperand(IdRef, self.typeResultId(member.ty).toRef());
+ }
+ try self.decorateStruct(ref_id, info);
+ },
+ .@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = ty.payload(.@"opaque").name,
+ }),
+ .pointer => {
+ const info = ty.payload(.pointer);
+ try types.emit(self.gpa, .OpTypePointer, .{
+ .id_result = result_id,
+ .storage_class = info.storage_class,
+ .type = self.typeResultId(ty.childType()).toRef(),
+ });
+ if (info.array_stride != 0) {
+ try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
+ }
+ if (info.alignment) |alignment| {
+ try annotations.decorate(self.gpa, ref_id, .{ .Alignment = .{ .alignment = alignment } });
+ }
+ if (info.max_byte_offset) |max_byte_offset| {
+ try annotations.decorate(self.gpa, ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
+ }
+ },
+ .function => {
+ const info = ty.payload(.function);
+ try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
+ types.writeOperand(IdResult, result_id);
+ types.writeOperand(IdRef, self.typeResultId(info.return_type).toRef());
+ for (info.parameters) |parameter_type| {
+ types.writeOperand(IdRef, self.typeResultId(parameter_type).toRef());
+ }
+ },
+ .event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
+ .device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
+ .reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
+ .queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
+ .pipe => try types.emit(self.gpa, .OpTypePipe, .{
+ .id_result = result_id,
+ .qualifier = ty.payload(.pipe).qualifier,
+ }),
+ .pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
+ .named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
+ }
+
+ return result_id.toResultType();
+}
+
+fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
+ const annotations = &self.sections.annotations;
+
+ // Decorations for the struct type itself.
+ if (info.decorations.block)
+ try annotations.decorate(self.gpa, target, .Block);
+ if (info.decorations.buffer_block)
+ try annotations.decorate(self.gpa, target, .BufferBlock);
+ if (info.decorations.glsl_shared)
+ try annotations.decorate(self.gpa, target, .GLSLShared);
+ if (info.decorations.glsl_packed)
+ try annotations.decorate(self.gpa, target, .GLSLPacked);
+ if (info.decorations.c_packed)
+ try annotations.decorate(self.gpa, target, .CPacked);
+
+ // Decorations for the struct members.
+ const extra = info.member_decoration_extra;
+ var extra_i: u32 = 0;
+ for (info.members) |member, i| {
+ const d = member.decorations;
+ const index = @intCast(Word, i);
+ switch (d.matrix_layout) {
+ .row_major => try annotations.decorateMember(self.gpa, target, index, .RowMajor),
+ .col_major => try annotations.decorateMember(self.gpa, target, index, .ColMajor),
+ .none => {},
+ }
+ if (d.matrix_layout != .none) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .MatrixStride = .{ .matrix_stride = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+
+ if (d.no_perspective)
+ try annotations.decorateMember(self.gpa, target, index, .NoPerspective);
+ if (d.flat)
+ try annotations.decorateMember(self.gpa, target, index, .Flat);
+ if (d.patch)
+ try annotations.decorateMember(self.gpa, target, index, .Patch);
+ if (d.centroid)
+ try annotations.decorateMember(self.gpa, target, index, .Centroid);
+ if (d.sample)
+ try annotations.decorateMember(self.gpa, target, index, .Sample);
+ if (d.invariant)
+ try annotations.decorateMember(self.gpa, target, index, .Invariant);
+ if (d.@"volatile")
+ try annotations.decorateMember(self.gpa, target, index, .Volatile);
+ if (d.coherent)
+ try annotations.decorateMember(self.gpa, target, index, .Coherent);
+ if (d.non_writable)
+ try annotations.decorateMember(self.gpa, target, index, .NonWritable);
+ if (d.non_readable)
+ try annotations.decorateMember(self.gpa, target, index, .NonReadable);
+
+ if (d.builtin) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
+ });
+ extra_i += 1;
+ }
+ if (d.stream) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .Stream = .{ .stream_number = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+ if (d.location) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .Location = .{ .location = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+ if (d.component) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .Component = .{ .component = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+ if (d.xfb_buffer) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+ if (d.xfb_stride) {
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .XfbStride = .{ .xfb_stride = extra[extra_i] },
+ });
+ extra_i += 1;
+ }
+ if (d.user_semantic) {
+ const len = extra[extra_i];
+ extra_i += 1;
+ const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
+ try annotations.decorateMember(self.gpa, target, index, .{
+ .UserSemantic = .{ .semantic = semantic },
+ });
+ extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
+ }
+ }
+}
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
new file mode 100644
index 0000000000..83383e442a
--- /dev/null
+++ b/src/codegen/spirv/Section.zig
@@ -0,0 +1,423 @@
+//! Represents a section or subsection of instructions in a SPIR-V binary. Instructions can be append
+//! to separate sections, which can then later be merged into the final binary.
+const Section = @This();
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const testing = std.testing;
+
+const spec = @import("spec.zig");
+const Word = spec.Word;
+const DoubleWord = std.meta.Int(.unsigned, @bitSizeOf(Word) * 2);
+const Log2Word = std.math.Log2Int(Word);
+
+const Opcode = spec.Opcode;
+
+/// The instructions in this section. Memory is owned by the Module
+/// externally associated to this Section.
+instructions: std.ArrayListUnmanaged(Word) = .{},
+
+pub fn deinit(section: *Section, allocator: Allocator) void {
+ section.instructions.deinit(allocator);
+ section.* = undefined;
+}
+
+/// Clear the instructions in this section
+pub fn reset(section: *Section) void {
+ section.instructions.items.len = 0;
+}
+
+pub fn toWords(section: Section) []Word {
+ return section.instructions.items;
+}
+
+/// Append the instructions from another section into this section.
+pub fn append(section: *Section, allocator: Allocator, other_section: Section) !void {
+ try section.instructions.appendSlice(allocator, other_section.instructions.items);
+}
+
+/// Write an instruction and size, operands are to be inserted manually.
+pub fn emitRaw(
+ section: *Section,
+ allocator: Allocator,
+ opcode: Opcode,
+ operands: usize, // opcode itself not included
+) !void {
+ const word_count = 1 + operands;
+ try section.instructions.ensureUnusedCapacity(allocator, word_count);
+ section.writeWord((@intCast(Word, word_count << 16)) | @enumToInt(opcode));
+}
+
+pub fn emit(
+ section: *Section,
+ allocator: Allocator,
+ comptime opcode: spec.Opcode,
+ operands: opcode.Operands(),
+) !void {
+ const word_count = instructionSize(opcode, operands);
+ try section.instructions.ensureUnusedCapacity(allocator, word_count);
+ section.writeWord(@intCast(Word, word_count << 16) | @enumToInt(opcode));
+ section.writeOperands(opcode.Operands(), operands);
+}
+
+/// Decorate a result-id.
+pub fn decorate(
+ section: *Section,
+ allocator: Allocator,
+ target: spec.IdRef,
+ decoration: spec.Decoration.Extended,
+) !void {
+ try section.emit(allocator, .OpDecorate, .{
+ .target = target,
+ .decoration = decoration,
+ });
+}
+
+/// Decorate a result-id which is a member of some struct.
+pub fn decorateMember(
+ section: *Section,
+ allocator: Allocator,
+ structure_type: spec.IdRef,
+ member: u32,
+ decoration: spec.Decoration.Extended,
+) !void {
+ try section.emit(allocator, .OpMemberDecorate, .{
+ .structure_type = structure_type,
+ .member = member,
+ .decoration = decoration,
+ });
+}
+
+pub fn writeWord(section: *Section, word: Word) void {
+ section.instructions.appendAssumeCapacity(word);
+}
+
+pub fn writeWords(section: *Section, words: []const Word) void {
+ section.instructions.appendSliceAssumeCapacity(words);
+}
+
+fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
+ section.writeWords(&.{
+ @truncate(Word, dword),
+ @truncate(Word, dword >> @bitSizeOf(Word)),
+ });
+}
+
+fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void {
+ const fields = switch (@typeInfo(Operands)) {
+ .Struct => |info| info.fields,
+ .Void => return,
+ else => unreachable,
+ };
+
+ inline for (fields) |field| {
+ section.writeOperand(field.field_type, @field(operands, field.name));
+ }
+}
+
+pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void {
+ switch (Operand) {
+ spec.IdResultType, spec.IdResult, spec.IdRef => section.writeWord(operand.id),
+
+ spec.LiteralInteger => section.writeWord(operand),
+
+ spec.LiteralString => section.writeString(operand),
+
+ spec.LiteralContextDependentNumber => section.writeContextDependentNumber(operand),
+
+ spec.LiteralExtInstInteger => section.writeWord(operand.inst),
+
+ // TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec json,
+ // so it most likely needs to be altered into something that can actually describe the entire
+ // instruction in which it is used.
+ spec.LiteralSpecConstantOpInteger => section.writeWord(@enumToInt(operand.opcode)),
+
+ spec.PairLiteralIntegerIdRef => section.writeWords(&.{ operand.value, operand.label.id }),
+ spec.PairIdRefLiteralInteger => section.writeWords(&.{ operand.target.id, operand.member }),
+ spec.PairIdRefIdRef => section.writeWords(&.{ operand[0].id, operand[1].id }),
+
+ else => switch (@typeInfo(Operand)) {
+ .Enum => section.writeWord(@enumToInt(operand)),
+ .Optional => |info| if (operand) |child| {
+ section.writeOperand(info.child, child);
+ },
+ .Pointer => |info| {
+ std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
+ for (operand) |item| {
+ section.writeOperand(info.child, item);
+ }
+ },
+ .Struct => |info| {
+ if (info.layout == .Packed) {
+ section.writeWord(@bitCast(Word, operand));
+ } else {
+ section.writeExtendedMask(Operand, operand);
+ }
+ },
+ .Union => section.writeExtendedUnion(Operand, operand),
+ else => unreachable,
+ },
+ }
+}
+
+fn writeString(section: *Section, str: []const u8) void {
+ // TODO: Not actually sure whether this is correct for big-endian.
+ // See https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html#Literal
+ const zero_terminated_len = str.len + 1;
+ var i: usize = 0;
+ while (i < zero_terminated_len) : (i += @sizeOf(Word)) {
+ var word: Word = 0;
+
+ var j: usize = 0;
+ while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
+ word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * std.meta.bitCount(u8));
+ }
+
+ section.instructions.appendAssumeCapacity(word);
+ }
+}
+
+fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
+ switch (operand) {
+ .int32 => |int| section.writeWord(@bitCast(Word, int)),
+ .uint32 => |int| section.writeWord(@bitCast(Word, int)),
+ .int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
+ .uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
+ .float32 => |float| section.writeWord(@bitCast(Word, float)),
+ .float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)),
+ }
+}
+
+fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
+ var mask: Word = 0;
+ inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
+ switch (@typeInfo(field.field_type)) {
+ .Optional => if (@field(operand, field.name) != null) {
+ mask |= 1 << @intCast(u5, bit);
+ },
+ .Bool => if (@field(operand, field.name)) {
+ mask |= 1 << @intCast(u5, bit);
+ },
+ else => unreachable,
+ }
+ }
+
+ if (mask == 0) {
+ return;
+ }
+
+ section.writeWord(mask);
+
+ inline for (@typeInfo(Operand).Struct.fields) |field| {
+ switch (@typeInfo(field.field_type)) {
+ .Optional => |info| if (@field(operand, field.name)) |child| {
+ section.writeOperands(info.child, child);
+ },
+ .Bool => {},
+ else => unreachable,
+ }
+ }
+}
+
+fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operand) void {
+ const tag = std.meta.activeTag(operand);
+ section.writeWord(@enumToInt(tag));
+
+ inline for (@typeInfo(Operand).Union.fields) |field| {
+ if (@field(Operand, field.name) == tag) {
+ section.writeOperands(field.field_type, @field(operand, field.name));
+ return;
+ }
+ }
+ unreachable;
+}
+
+fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) usize {
+ return 1 + operandsSize(opcode.Operands(), operands);
+}
+
+fn operandsSize(comptime Operands: type, operands: Operands) usize {
+ const fields = switch (@typeInfo(Operands)) {
+ .Struct => |info| info.fields,
+ .Void => return 0,
+ else => unreachable,
+ };
+
+ var total: usize = 0;
+ inline for (fields) |field| {
+ total += operandSize(field.field_type, @field(operands, field.name));
+ }
+
+ return total;
+}
+
+fn operandSize(comptime Operand: type, operand: Operand) usize {
+ return switch (Operand) {
+ spec.IdResultType,
+ spec.IdResult,
+ spec.IdRef,
+ spec.LiteralInteger,
+ spec.LiteralExtInstInteger,
+ => 1,
+
+ spec.LiteralString => std.math.divCeil(usize, operand.len + 1, @sizeOf(Word)) catch unreachable, // Add one for zero-terminator
+
+ spec.LiteralContextDependentNumber => switch (operand) {
+ .int32, .uint32, .float32 => @as(usize, 1),
+ .int64, .uint64, .float64 => @as(usize, 2),
+ },
+
+ // TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec
+ // json, so it most likely needs to be altered into something that can actually
+ // describe the entire insturction in which it is used.
+ spec.LiteralSpecConstantOpInteger => 1,
+
+ spec.PairLiteralIntegerIdRef,
+ spec.PairIdRefLiteralInteger,
+ spec.PairIdRefIdRef,
+ => 2,
+
+ else => switch (@typeInfo(Operand)) {
+ .Enum => 1,
+ .Optional => |info| if (operand) |child| operandSize(info.child, child) else 0,
+ .Pointer => |info| blk: {
+ std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
+ var total: usize = 0;
+ for (operand) |item| {
+ total += operandSize(info.child, item);
+ }
+ break :blk total;
+ },
+ .Struct => |info| if (info.layout == .Packed) 1 else extendedMaskSize(Operand, operand),
+ .Union => extendedUnionSize(Operand, operand),
+ else => unreachable,
+ },
+ };
+}
+
+fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
+ var total: usize = 0;
+ var any_set = false;
+ inline for (@typeInfo(Operand).Struct.fields) |field| {
+ switch (@typeInfo(field.field_type)) {
+ .Optional => |info| if (@field(operand, field.name)) |child| {
+ total += operandsSize(info.child, child);
+ any_set = true;
+ },
+ .Bool => if (@field(operand, field.name)) {
+ any_set = true;
+ },
+ else => unreachable,
+ }
+ }
+ if (!any_set) {
+ return 0;
+ }
+ return total + 1; // Add one for the mask itself.
+}
+
+fn extendedUnionSize(comptime Operand: type, operand: Operand) usize {
+ const tag = std.meta.activeTag(operand);
+ inline for (@typeInfo(Operand).Union.fields) |field| {
+ if (@field(Operand, field.name) == tag) {
+ // Add one for the tag itself.
+ return 1 + operandsSize(field.field_type, @field(operand, field.name));
+ }
+ }
+ unreachable;
+}
+
+test "SPIR-V Section emit() - no operands" {
+ var section = Section{};
+ defer section.deinit(std.testing.allocator);
+
+ try section.emit(std.testing.allocator, .OpNop, {});
+
+ try testing.expect(section.instructions.items[0] == (@as(Word, 1) << 16) | @enumToInt(Opcode.OpNop));
+}
+
+test "SPIR-V Section emit() - simple" {
+ var section = Section{};
+ defer section.deinit(std.testing.allocator);
+
+ try section.emit(std.testing.allocator, .OpUndef, .{
+ .id_result_type = .{ .id = 0 },
+ .id_result = .{ .id = 1 },
+ });
+
+ try testing.expectEqualSlices(Word, &.{
+ (@as(Word, 3) << 16) | @enumToInt(Opcode.OpUndef),
+ 0,
+ 1,
+ }, section.instructions.items);
+}
+
+test "SPIR-V Section emit() - string" {
+ var section = Section{};
+ defer section.deinit(std.testing.allocator);
+
+ try section.emit(std.testing.allocator, .OpSource, .{
+ .source_language = .Unknown,
+ .version = 123,
+ .file = .{ .id = 456 },
+ .source = "pub fn main() void {}",
+ });
+
+ try testing.expectEqualSlices(Word, &.{
+ (@as(Word, 10) << 16) | @enumToInt(Opcode.OpSource),
+ @enumToInt(spec.SourceLanguage.Unknown),
+ 123,
+ 456,
+ std.mem.bytesToValue(Word, "pub "),
+ std.mem.bytesToValue(Word, "fn m"),
+ std.mem.bytesToValue(Word, "ain("),
+ std.mem.bytesToValue(Word, ") vo"),
+ std.mem.bytesToValue(Word, "id {"),
+ std.mem.bytesToValue(Word, "}\x00\x00\x00"),
+ }, section.instructions.items);
+}
+
+test "SPIR-V Section emit()- extended mask" {
+ var section = Section{};
+ defer section.deinit(std.testing.allocator);
+
+ try section.emit(std.testing.allocator, .OpLoopMerge, .{
+ .merge_block = .{ .id = 10 },
+ .continue_target = .{ .id = 20 },
+ .loop_control = .{
+ .Unroll = true,
+ .DependencyLength = .{
+ .literal_integer = 2,
+ },
+ },
+ });
+
+ try testing.expectEqualSlices(Word, &.{
+ (@as(Word, 5) << 16) | @enumToInt(Opcode.OpLoopMerge),
+ 10,
+ 20,
+ @bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }),
+ 2,
+ }, section.instructions.items);
+}
+
+test "SPIR-V Section emit() - extended union" {
+ var section = Section{};
+ defer section.deinit(std.testing.allocator);
+
+ try section.emit(std.testing.allocator, .OpExecutionMode, .{
+ .entry_point = .{ .id = 888 },
+ .mode = .{
+ .LocalSize = .{ .x_size = 4, .y_size = 8, .z_size = 16 },
+ },
+ });
+
+ try testing.expectEqualSlices(Word, &.{
+ (@as(Word, 6) << 16) | @enumToInt(Opcode.OpExecutionMode),
+ 888,
+ @enumToInt(spec.ExecutionMode.LocalSize),
+ 4,
+ 8,
+ 16,
+ }, section.instructions.items);
+}
diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig
index 26d1925646..bb7453c006 100644
--- a/src/codegen/spirv/spec.zig
+++ b/src/codegen/spirv/spec.zig
@@ -1,8 +1,46 @@
//! This file is auto-generated by tools/gen_spirv_spec.zig.
const Version = @import("std").builtin.Version;
+
+pub const Word = u32;
+pub const IdResultType = struct {
+ id: Word,
+ pub fn toRef(self: IdResultType) IdRef {
+ return .{ .id = self.id };
+ }
+};
+pub const IdResult = struct {
+ id: Word,
+ pub fn toRef(self: IdResult) IdRef {
+ return .{ .id = self.id };
+ }
+ pub fn toResultType(self: IdResult) IdResultType {
+ return .{ .id = self.id };
+ }
+};
+pub const IdRef = struct { id: Word };
+
+pub const IdMemorySemantics = IdRef;
+pub const IdScope = IdRef;
+
+pub const LiteralInteger = Word;
+pub const LiteralString = []const u8;
+pub const LiteralContextDependentNumber = union(enum) {
+ int32: i32,
+ uint32: u32,
+ int64: i64,
+ uint64: u64,
+ float32: f32,
+ float64: f64,
+};
+pub const LiteralExtInstInteger = struct { inst: Word };
+pub const LiteralSpecConstantOpInteger = struct { opcode: Opcode };
+pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: IdRef };
+pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger };
+pub const PairIdRefIdRef = [2]IdRef;
+
pub const version = Version{ .major = 1, .minor = 5, .patch = 4 };
-pub const magic_number: u32 = 0x07230203;
+pub const magic_number: Word = 0x07230203;
pub const Opcode = enum(u16) {
OpNop = 0,
OpUndef = 1,
@@ -381,11 +419,11 @@ pub const Opcode = enum(u16) {
OpImageSampleFootprintNV = 5283,
OpGroupNonUniformPartitionNV = 5296,
OpWritePackedPrimitiveIndices4x8NV = 5299,
- OpReportIntersectionNV = 5334,
+ OpReportIntersectionKHR = 5334,
OpIgnoreIntersectionNV = 5335,
OpTerminateRayNV = 5336,
OpTraceNV = 5337,
- OpTypeAccelerationStructureNV = 5341,
+ OpTypeAccelerationStructureKHR = 5341,
OpExecuteCallableNV = 5344,
OpTypeCooperativeMatrixNV = 5358,
OpCooperativeMatrixLoadNV = 5359,
@@ -580,10 +618,592 @@ pub const Opcode = enum(u16) {
OpTypeStructContinuedINTEL = 6090,
OpConstantCompositeContinuedINTEL = 6091,
OpSpecConstantCompositeContinuedINTEL = 6092,
- _,
- const OpReportIntersectionKHR: Opcode = .OpReportIntersectionNV;
- const OpTypeAccelerationStructureKHR: Opcode = .OpTypeAccelerationStructureNV;
+ pub const OpReportIntersectionNV = Opcode.OpReportIntersectionKHR;
+ pub const OpTypeAccelerationStructureNV = Opcode.OpTypeAccelerationStructureKHR;
+ pub const OpDecorateStringGOOGLE = Opcode.OpDecorateString;
+ pub const OpMemberDecorateStringGOOGLE = Opcode.OpMemberDecorateString;
+
+ pub fn Operands(comptime self: Opcode) type {
+ return switch (self) {
+ .OpNop => void,
+ .OpUndef => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSourceContinued => struct { continued_source: LiteralString },
+ .OpSource => struct { source_language: SourceLanguage, version: LiteralInteger, file: ?IdRef = null, source: ?LiteralString = null },
+ .OpSourceExtension => struct { extension: LiteralString },
+ .OpName => struct { target: IdRef, name: LiteralString },
+ .OpMemberName => struct { type: IdRef, member: LiteralInteger, name: LiteralString },
+ .OpString => struct { id_result: IdResult, string: LiteralString },
+ .OpLine => struct { file: IdRef, line: LiteralInteger, column: LiteralInteger },
+ .OpExtension => struct { name: LiteralString },
+ .OpExtInstImport => struct { id_result: IdResult, name: LiteralString },
+ .OpExtInst => struct { id_result_type: IdResultType, id_result: IdResult, set: IdRef, instruction: LiteralExtInstInteger, id_ref_4: []const IdRef = &.{} },
+ .OpMemoryModel => struct { addressing_model: AddressingModel, memory_model: MemoryModel },
+ .OpEntryPoint => struct { execution_model: ExecutionModel, entry_point: IdRef, name: LiteralString, interface: []const IdRef = &.{} },
+ .OpExecutionMode => struct { entry_point: IdRef, mode: ExecutionMode.Extended },
+ .OpCapability => struct { capability: Capability },
+ .OpTypeVoid => struct { id_result: IdResult },
+ .OpTypeBool => struct { id_result: IdResult },
+ .OpTypeInt => struct { id_result: IdResult, width: LiteralInteger, signedness: LiteralInteger },
+ .OpTypeFloat => struct { id_result: IdResult, width: LiteralInteger },
+ .OpTypeVector => struct { id_result: IdResult, component_type: IdRef, component_count: LiteralInteger },
+ .OpTypeMatrix => struct { id_result: IdResult, column_type: IdRef, column_count: LiteralInteger },
+ .OpTypeImage => struct { id_result: IdResult, sampled_type: IdRef, dim: Dim, depth: LiteralInteger, arrayed: LiteralInteger, ms: LiteralInteger, sampled: LiteralInteger, image_format: ImageFormat, access_qualifier: ?AccessQualifier = null },
+ .OpTypeSampler => struct { id_result: IdResult },
+ .OpTypeSampledImage => struct { id_result: IdResult, image_type: IdRef },
+ .OpTypeArray => struct { id_result: IdResult, element_type: IdRef, length: IdRef },
+ .OpTypeRuntimeArray => struct { id_result: IdResult, element_type: IdRef },
+ .OpTypeStruct => struct { id_result: IdResult, id_ref: []const IdRef = &.{} },
+ .OpTypeOpaque => struct { id_result: IdResult, literal_string: LiteralString },
+ .OpTypePointer => struct { id_result: IdResult, storage_class: StorageClass, type: IdRef },
+ .OpTypeFunction => struct { id_result: IdResult, return_type: IdRef, id_ref_2: []const IdRef = &.{} },
+ .OpTypeEvent => struct { id_result: IdResult },
+ .OpTypeDeviceEvent => struct { id_result: IdResult },
+ .OpTypeReserveId => struct { id_result: IdResult },
+ .OpTypeQueue => struct { id_result: IdResult },
+ .OpTypePipe => struct { id_result: IdResult, qualifier: AccessQualifier },
+ .OpTypeForwardPointer => struct { pointer_type: IdRef, storage_class: StorageClass },
+ .OpConstantTrue => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpConstantFalse => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpConstant => struct { id_result_type: IdResultType, id_result: IdResult, value: LiteralContextDependentNumber },
+ .OpConstantComposite => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} },
+ .OpConstantSampler => struct { id_result_type: IdResultType, id_result: IdResult, sampler_addressing_mode: SamplerAddressingMode, param: LiteralInteger, sampler_filter_mode: SamplerFilterMode },
+ .OpConstantNull => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSpecConstantTrue => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSpecConstantFalse => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSpecConstant => struct { id_result_type: IdResultType, id_result: IdResult, value: LiteralContextDependentNumber },
+ .OpSpecConstantComposite => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} },
+ .OpSpecConstantOp => struct { id_result_type: IdResultType, id_result: IdResult, opcode: LiteralSpecConstantOpInteger },
+ .OpFunction => struct { id_result_type: IdResultType, id_result: IdResult, function_control: FunctionControl, function_type: IdRef },
+ .OpFunctionParameter => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpFunctionEnd => void,
+ .OpFunctionCall => struct { id_result_type: IdResultType, id_result: IdResult, function: IdRef, id_ref_3: []const IdRef = &.{} },
+ .OpVariable => struct { id_result_type: IdResultType, id_result: IdResult, storage_class: StorageClass, initializer: ?IdRef = null },
+ .OpImageTexelPointer => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, sample: IdRef },
+ .OpLoad => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory_access: ?MemoryAccess.Extended = null },
+ .OpStore => struct { pointer: IdRef, object: IdRef, memory_access: ?MemoryAccess.Extended = null },
+ .OpCopyMemory => struct { target: IdRef, source: IdRef, memory_access_2: ?MemoryAccess.Extended = null, memory_access_3: ?MemoryAccess.Extended = null },
+ .OpCopyMemorySized => struct { target: IdRef, source: IdRef, size: IdRef, memory_access_3: ?MemoryAccess.Extended = null, memory_access_4: ?MemoryAccess.Extended = null },
+ .OpAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, indexes: []const IdRef = &.{} },
+ .OpInBoundsAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, indexes: []const IdRef = &.{} },
+ .OpPtrAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, element: IdRef, indexes: []const IdRef = &.{} },
+ .OpArrayLength => struct { id_result_type: IdResultType, id_result: IdResult, structure: IdRef, array_member: LiteralInteger },
+ .OpGenericPtrMemSemantics => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpInBoundsPtrAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, element: IdRef, indexes: []const IdRef = &.{} },
+ .OpDecorate => struct { target: IdRef, decoration: Decoration.Extended },
+ .OpMemberDecorate => struct { structure_type: IdRef, member: LiteralInteger, decoration: Decoration.Extended },
+ .OpDecorationGroup => struct { id_result: IdResult },
+ .OpGroupDecorate => struct { decoration_group: IdRef, targets: []const IdRef = &.{} },
+ .OpGroupMemberDecorate => struct { decoration_group: IdRef, targets: []const PairIdRefLiteralInteger = &.{} },
+ .OpVectorExtractDynamic => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, index: IdRef },
+ .OpVectorInsertDynamic => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, component: IdRef, index: IdRef },
+ .OpVectorShuffle => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, components: []const LiteralInteger = &.{} },
+ .OpCompositeConstruct => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} },
+ .OpCompositeExtract => struct { id_result_type: IdResultType, id_result: IdResult, composite: IdRef, indexes: []const LiteralInteger = &.{} },
+ .OpCompositeInsert => struct { id_result_type: IdResultType, id_result: IdResult, object: IdRef, composite: IdRef, indexes: []const LiteralInteger = &.{} },
+ .OpCopyObject => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpTranspose => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef },
+ .OpSampledImage => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, sampler: IdRef },
+ .OpImageSampleImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSampleDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSampleProjImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleProjExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSampleProjDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleProjDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageFetch => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, component: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageDrefGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageRead => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageWrite => struct { image: IdRef, coordinate: IdRef, texel: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImage => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef },
+ .OpImageQueryFormat => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef },
+ .OpImageQueryOrder => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef },
+ .OpImageQuerySizeLod => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, level_of_detail: IdRef },
+ .OpImageQuerySize => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef },
+ .OpImageQueryLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef },
+ .OpImageQueryLevels => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef },
+ .OpImageQuerySamples => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef },
+ .OpConvertFToU => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef },
+ .OpConvertFToS => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef },
+ .OpConvertSToF => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef },
+ .OpConvertUToF => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef },
+ .OpUConvert => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef },
+ .OpSConvert => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef },
+ .OpFConvert => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef },
+ .OpQuantizeToF16 => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef },
+ .OpConvertPtrToU => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpSatConvertSToU => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef },
+ .OpSatConvertUToS => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef },
+ .OpConvertUToPtr => struct { id_result_type: IdResultType, id_result: IdResult, integer_value: IdRef },
+ .OpPtrCastToGeneric => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpGenericCastToPtr => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpGenericCastToPtrExplicit => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, storage: StorageClass },
+ .OpBitcast => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpSNegate => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpFNegate => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpIAdd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFAdd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpISub => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFSub => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpIMul => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFMul => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSRem => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFRem => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpVectorTimesScalar => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, scalar: IdRef },
+ .OpMatrixTimesScalar => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef, scalar: IdRef },
+ .OpVectorTimesMatrix => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, matrix: IdRef },
+ .OpMatrixTimesVector => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef, vector: IdRef },
+ .OpMatrixTimesMatrix => struct { id_result_type: IdResultType, id_result: IdResult, leftmatrix: IdRef, rightmatrix: IdRef },
+ .OpOuterProduct => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef },
+ .OpDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef },
+ .OpIAddCarry => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpISubBorrow => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUMulExtended => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSMulExtended => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpAny => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef },
+ .OpAll => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef },
+ .OpIsNan => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef },
+ .OpIsInf => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef },
+ .OpIsFinite => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef },
+ .OpIsNormal => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef },
+ .OpSignBitSet => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef },
+ .OpLessOrGreater => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef },
+ .OpOrdered => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef },
+ .OpUnordered => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef },
+ .OpLogicalEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpLogicalNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpLogicalOr => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpLogicalAnd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpLogicalNot => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpSelect => struct { id_result_type: IdResultType, id_result: IdResult, condition: IdRef, object_1: IdRef, object_2: IdRef },
+ .OpIEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpINotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpULessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpULessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpSLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFOrdGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpFUnordGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpShiftRightLogical => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef },
+ .OpShiftRightArithmetic => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef },
+ .OpShiftLeftLogical => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef },
+ .OpBitwiseOr => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpBitwiseXor => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpBitwiseAnd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpNot => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpBitFieldInsert => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, insert: IdRef, offset: IdRef, count: IdRef },
+ .OpBitFieldSExtract => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, offset: IdRef, count: IdRef },
+ .OpBitFieldUExtract => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, offset: IdRef, count: IdRef },
+ .OpBitReverse => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef },
+ .OpBitCount => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef },
+ .OpDPdx => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpDPdy => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpFwidth => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpDPdxFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpDPdyFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpFwidthFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpDPdxCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpDPdyCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpFwidthCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef },
+ .OpEmitVertex => void,
+ .OpEndPrimitive => void,
+ .OpEmitStreamVertex => struct { stream: IdRef },
+ .OpEndStreamPrimitive => struct { stream: IdRef },
+ .OpControlBarrier => struct { execution: IdScope, memory: IdScope, semantics: IdMemorySemantics },
+ .OpMemoryBarrier => struct { memory: IdScope, semantics: IdMemorySemantics },
+ .OpAtomicLoad => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpAtomicStore => struct { pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicExchange => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicCompareExchange => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, equal: IdMemorySemantics, unequal: IdMemorySemantics, value: IdRef, comparator: IdRef },
+ .OpAtomicCompareExchangeWeak => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, equal: IdMemorySemantics, unequal: IdMemorySemantics, value: IdRef, comparator: IdRef },
+ .OpAtomicIIncrement => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpAtomicIDecrement => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpAtomicIAdd => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicISub => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicSMin => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicUMin => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicSMax => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicUMax => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicAnd => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicOr => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicXor => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpPhi => struct { id_result_type: IdResultType, id_result: IdResult, pair_id_ref_id_ref: []const PairIdRefIdRef = &.{} },
+ .OpLoopMerge => struct { merge_block: IdRef, continue_target: IdRef, loop_control: LoopControl.Extended },
+ .OpSelectionMerge => struct { merge_block: IdRef, selection_control: SelectionControl },
+ .OpLabel => struct { id_result: IdResult },
+ .OpBranch => struct { target_label: IdRef },
+ .OpBranchConditional => struct { condition: IdRef, true_label: IdRef, false_label: IdRef, branch_weights: []const LiteralInteger = &.{} },
+ .OpSwitch => struct { selector: IdRef, default: IdRef, target: []const PairLiteralIntegerIdRef = &.{} },
+ .OpKill => void,
+ .OpReturn => void,
+ .OpReturnValue => struct { value: IdRef },
+ .OpUnreachable => void,
+ .OpLifetimeStart => struct { pointer: IdRef, size: LiteralInteger },
+ .OpLifetimeStop => struct { pointer: IdRef, size: LiteralInteger },
+ .OpGroupAsyncCopy => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, destination: IdRef, source: IdRef, num_elements: IdRef, stride: IdRef, event: IdRef },
+ .OpGroupWaitEvents => struct { execution: IdScope, num_events: IdRef, events_list: IdRef },
+ .OpGroupAll => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef },
+ .OpGroupAny => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef },
+ .OpGroupBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, localid: IdRef },
+ .OpGroupIAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupUMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupSMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupUMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupSMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpReadPipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpWritePipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpReservedReadPipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, reserve_id: IdRef, index: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpReservedWritePipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, reserve_id: IdRef, index: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpReserveReadPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpReserveWritePipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpCommitReadPipe => struct { pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpCommitWritePipe => struct { pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpIsValidReserveId => struct { id_result_type: IdResultType, id_result: IdResult, reserve_id: IdRef },
+ .OpGetNumPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpGetMaxPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpGroupReserveReadPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpGroupReserveWritePipePackets => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpGroupCommitReadPipe => struct { execution: IdScope, pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpGroupCommitWritePipe => struct { execution: IdScope, pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef },
+ .OpEnqueueMarker => struct { id_result_type: IdResultType, id_result: IdResult, queue: IdRef, num_events: IdRef, wait_events: IdRef, ret_event: IdRef },
+ .OpEnqueueKernel => struct { id_result_type: IdResultType, id_result: IdResult, queue: IdRef, flags: IdRef, nd_range: IdRef, num_events: IdRef, wait_events: IdRef, ret_event: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef, local_size: []const IdRef = &.{} },
+ .OpGetKernelNDrangeSubGroupCount => struct { id_result_type: IdResultType, id_result: IdResult, nd_range: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpGetKernelNDrangeMaxSubGroupSize => struct { id_result_type: IdResultType, id_result: IdResult, nd_range: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpGetKernelWorkGroupSize => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpGetKernelPreferredWorkGroupSizeMultiple => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpRetainEvent => struct { event: IdRef },
+ .OpReleaseEvent => struct { event: IdRef },
+ .OpCreateUserEvent => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpIsValidEvent => struct { id_result_type: IdResultType, id_result: IdResult, event: IdRef },
+ .OpSetUserEventStatus => struct { event: IdRef, status: IdRef },
+ .OpCaptureEventProfilingInfo => struct { event: IdRef, profiling_info: IdRef, value: IdRef },
+ .OpGetDefaultQueue => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpBuildNDRange => struct { id_result_type: IdResultType, id_result: IdResult, globalworksize: IdRef, localworksize: IdRef, globalworkoffset: IdRef },
+ .OpImageSparseSampleImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleProjImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleProjExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleProjDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleProjDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended },
+ .OpImageSparseFetch => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, component: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseDrefGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseTexelsResident => struct { id_result_type: IdResultType, id_result: IdResult, resident_code: IdRef },
+ .OpNoLine => void,
+ .OpAtomicFlagTestAndSet => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpAtomicFlagClear => struct { pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpImageSparseRead => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpSizeOf => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpTypePipeStorage => struct { id_result: IdResult },
+ .OpConstantPipeStorage => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: LiteralInteger, packet_alignment: LiteralInteger, capacity: LiteralInteger },
+ .OpCreatePipeFromPipeStorage => struct { id_result_type: IdResultType, id_result: IdResult, pipe_storage: IdRef },
+ .OpGetKernelLocalSizeForSubgroupCount => struct { id_result_type: IdResultType, id_result: IdResult, subgroup_count: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpGetKernelMaxNumSubgroups => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef },
+ .OpTypeNamedBarrier => struct { id_result: IdResult },
+ .OpNamedBarrierInitialize => struct { id_result_type: IdResultType, id_result: IdResult, subgroup_count: IdRef },
+ .OpMemoryNamedBarrier => struct { named_barrier: IdRef, memory: IdScope, semantics: IdMemorySemantics },
+ .OpModuleProcessed => struct { process: LiteralString },
+ .OpExecutionModeId => struct { entry_point: IdRef, mode: ExecutionMode.Extended },
+ .OpDecorateId => struct { target: IdRef, decoration: Decoration.Extended },
+ .OpGroupNonUniformElect => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope },
+ .OpGroupNonUniformAll => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef },
+ .OpGroupNonUniformAny => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef },
+ .OpGroupNonUniformAllEqual => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef },
+ .OpGroupNonUniformBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, id: IdRef },
+ .OpGroupNonUniformBroadcastFirst => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef },
+ .OpGroupNonUniformBallot => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef },
+ .OpGroupNonUniformInverseBallot => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef },
+ .OpGroupNonUniformBallotBitExtract => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, index: IdRef },
+ .OpGroupNonUniformBallotBitCount => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef },
+ .OpGroupNonUniformBallotFindLSB => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef },
+ .OpGroupNonUniformBallotFindMSB => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef },
+ .OpGroupNonUniformShuffle => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, id: IdRef },
+ .OpGroupNonUniformShuffleXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, mask: IdRef },
+ .OpGroupNonUniformShuffleUp => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, delta: IdRef },
+ .OpGroupNonUniformShuffleDown => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, delta: IdRef },
+ .OpGroupNonUniformIAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformFAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformIMul => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformFMul => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformSMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformUMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformFMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformSMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformUMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformFMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformBitwiseAnd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformBitwiseOr => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformBitwiseXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformLogicalAnd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformLogicalOr => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformLogicalXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null },
+ .OpGroupNonUniformQuadBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, index: IdRef },
+ .OpGroupNonUniformQuadSwap => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, direction: IdRef },
+ .OpCopyLogical => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpPtrEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpPtrNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpPtrDiff => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpTerminateInvocation => void,
+ .OpSubgroupBallotKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef },
+ .OpSubgroupFirstInvocationKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef },
+ .OpSubgroupAllKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef },
+ .OpSubgroupAnyKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef },
+ .OpSubgroupAllEqualKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef },
+ .OpSubgroupReadInvocationKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef, index: IdRef },
+ .OpTraceRayKHR => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, payload: IdRef },
+ .OpExecuteCallableKHR => struct { sbt_index: IdRef, callable_data: IdRef },
+ .OpConvertUToAccelerationStructureKHR => struct { id_result_type: IdResultType, id_result: IdResult, accel: IdRef },
+ .OpIgnoreIntersectionKHR => void,
+ .OpTerminateRayKHR => void,
+ .OpTypeRayQueryKHR => struct { id_result: IdResult },
+ .OpRayQueryInitializeKHR => struct { rayquery: IdRef, accel: IdRef, rayflags: IdRef, cullmask: IdRef, rayorigin: IdRef, raytmin: IdRef, raydirection: IdRef, raytmax: IdRef },
+ .OpRayQueryTerminateKHR => struct { rayquery: IdRef },
+ .OpRayQueryGenerateIntersectionKHR => struct { rayquery: IdRef, hitt: IdRef },
+ .OpRayQueryConfirmIntersectionKHR => struct { rayquery: IdRef },
+ .OpRayQueryProceedKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetIntersectionTypeKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpGroupIAddNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFAddNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupUMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupSMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupFMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupUMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpGroupSMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef },
+ .OpFragmentMaskFetchAMD => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef },
+ .OpFragmentFetchAMD => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, fragment_index: IdRef },
+ .OpReadClockKHR => struct { id_result_type: IdResultType, id_result: IdResult, scope: IdScope },
+ .OpImageSampleFootprintNV => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, granularity: IdRef, coarse: IdRef, image_operands: ?ImageOperands.Extended = null },
+ .OpGroupNonUniformPartitionNV => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef },
+ .OpWritePackedPrimitiveIndices4x8NV => struct { index_offset: IdRef, packed_indices: IdRef },
+ .OpReportIntersectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, hit: IdRef, hitkind: IdRef },
+ .OpIgnoreIntersectionNV => void,
+ .OpTerminateRayNV => void,
+ .OpTraceNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, payloadid: IdRef },
+ .OpTypeAccelerationStructureKHR => struct { id_result: IdResult },
+ .OpExecuteCallableNV => struct { sbt_index: IdRef, callable_dataid: IdRef },
+ .OpTypeCooperativeMatrixNV => struct { id_result: IdResult, component_type: IdRef, execution: IdScope, rows: IdRef, columns: IdRef },
+ .OpCooperativeMatrixLoadNV => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, stride: IdRef, column_major: IdRef, memory_access: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixStoreNV => struct { pointer: IdRef, object: IdRef, stride: IdRef, column_major: IdRef, memory_access: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixMulAddNV => struct { id_result_type: IdResultType, id_result: IdResult, a: IdRef, b: IdRef, c: IdRef },
+ .OpCooperativeMatrixLengthNV => struct { id_result_type: IdResultType, id_result: IdResult, type: IdRef },
+ .OpBeginInvocationInterlockEXT => void,
+ .OpEndInvocationInterlockEXT => void,
+ .OpDemoteToHelperInvocationEXT => void,
+ .OpIsHelperInvocationEXT => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupShuffleINTEL => struct { id_result_type: IdResultType, id_result: IdResult, data: IdRef, invocationid: IdRef },
+ .OpSubgroupShuffleDownINTEL => struct { id_result_type: IdResultType, id_result: IdResult, current: IdRef, next: IdRef, delta: IdRef },
+ .OpSubgroupShuffleUpINTEL => struct { id_result_type: IdResultType, id_result: IdResult, previous: IdRef, current: IdRef, delta: IdRef },
+ .OpSubgroupShuffleXorINTEL => struct { id_result_type: IdResultType, id_result: IdResult, data: IdRef, value: IdRef },
+ .OpSubgroupBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ptr: IdRef },
+ .OpSubgroupBlockWriteINTEL => struct { ptr: IdRef, data: IdRef },
+ .OpSubgroupImageBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef },
+ .OpSubgroupImageBlockWriteINTEL => struct { image: IdRef, coordinate: IdRef, data: IdRef },
+ .OpSubgroupImageMediaBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, width: IdRef, height: IdRef },
+ .OpSubgroupImageMediaBlockWriteINTEL => struct { image: IdRef, coordinate: IdRef, width: IdRef, height: IdRef, data: IdRef },
+ .OpUCountLeadingZerosINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpUCountTrailingZerosINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef },
+ .OpAbsISubINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpAbsUSubINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpIAddSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUAddSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpIAverageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUAverageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpIAverageRoundedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUAverageRoundedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpISubSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUSubSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpIMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpUMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef },
+ .OpConstFunctionPointerINTEL => struct { id_result_type: IdResultType, id_result: IdResult, function: IdRef },
+ .OpFunctionPointerCallINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: []const IdRef = &.{} },
+ .OpAsmTargetINTEL => struct { id_result_type: IdResultType, id_result: IdResult, asm_target: LiteralString },
+ .OpAsmINTEL => struct { id_result_type: IdResultType, id_result: IdResult, asm_type: IdRef, target: IdRef, asm_instructions: LiteralString, constraints: LiteralString },
+ .OpAsmCallINTEL => struct { id_result_type: IdResultType, id_result: IdResult, @"asm": IdRef, argument_0: []const IdRef = &.{} },
+ .OpAtomicFMinEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAtomicFMaxEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpAssumeTrueKHR => struct { condition: IdRef },
+ .OpExpectKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef, expectedvalue: IdRef },
+ .OpDecorateString => struct { target: IdRef, decoration: Decoration.Extended },
+ .OpMemberDecorateString => struct { struct_type: IdRef, member: LiteralInteger, decoration: Decoration.Extended },
+ .OpVmeImageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image_type: IdRef, sampler: IdRef },
+ .OpTypeVmeImageINTEL => struct { id_result: IdResult, image_type: IdRef },
+ .OpTypeAvcImePayloadINTEL => struct { id_result: IdResult },
+ .OpTypeAvcRefPayloadINTEL => struct { id_result: IdResult },
+ .OpTypeAvcSicPayloadINTEL => struct { id_result: IdResult },
+ .OpTypeAvcMcePayloadINTEL => struct { id_result: IdResult },
+ .OpTypeAvcMceResultINTEL => struct { id_result: IdResult },
+ .OpTypeAvcImeResultINTEL => struct { id_result: IdResult },
+ .OpTypeAvcImeResultSingleReferenceStreamoutINTEL => struct { id_result: IdResult },
+ .OpTypeAvcImeResultDualReferenceStreamoutINTEL => struct { id_result: IdResult },
+ .OpTypeAvcImeSingleReferenceStreaminINTEL => struct { id_result: IdResult },
+ .OpTypeAvcImeDualReferenceStreaminINTEL => struct { id_result: IdResult },
+ .OpTypeAvcRefResultINTEL => struct { id_result: IdResult },
+ .OpTypeAvcSicResultINTEL => struct { id_result: IdResult },
+ .OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, reference_base_penalty: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceSetInterShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_shape_penalty: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceSetInterDirectionPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, direction_cost: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_cost_center_delta: IdRef, packed_cost_table: IdRef, cost_precision: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef },
+ .OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpSubgroupAvcMceSetAcOnlyHaarINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL => struct { id_result_type: IdResultType, id_result: IdResult, source_field_polarity: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL => struct { id_result_type: IdResultType, id_result: IdResult, reference_field_polarity: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, forward_reference_field_polarity: IdRef, backward_reference_field_polarity: IdRef, payload: IdRef },
+ .OpSubgroupAvcMceConvertToImePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceConvertToImeResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceConvertToRefPayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceConvertToRefResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceConvertToSicPayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceConvertToSicResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetBestInterDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterMajorShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterMinorShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterDirectionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterMotionVectorCountINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_reference_ids: IdRef, packed_reference_parameter_field_polarities: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, partition_mask: IdRef, sad_adjustment: IdRef },
+ .OpSubgroupAvcImeSetSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ref_offset: IdRef, search_window_config: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeSetDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, fwd_ref_offset: IdRef, bwd_ref_offset: IdRef, id_ref_4: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeRefWindowSizeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, search_window_config: IdRef, dual_ref: IdRef },
+ .OpSubgroupAvcImeAdjustRefOffsetINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ref_offset: IdRef, src_coord: IdRef, ref_window_size: IdRef, image_size: IdRef },
+ .OpSubgroupAvcImeConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeSetMaxMotionVectorCountINTEL => struct { id_result_type: IdResultType, id_result: IdResult, max_motion_vector_count: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, threshold: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeSetWeightedSadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_sad_weights: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef, streamin_components: IdRef },
+ .OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef, streamin_components: IdRef },
+ .OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef, streamin_components: IdRef },
+ .OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef, streamin_components: IdRef },
+ .OpSubgroupAvcImeConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetSingleReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetDualReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeStripDualReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef },
+ .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef },
+ .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef },
+ .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef },
+ .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef },
+ .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef },
+ .OpSubgroupAvcImeGetBorderReachedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image_select: IdRef, payload: IdRef },
+ .OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcFmeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, motion_vectors: IdRef, major_shapes: IdRef, minor_shapes: IdRef, direction: IdRef, pixel_resolution: IdRef, sad_adjustment: IdRef },
+ .OpSubgroupAvcBmeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, motion_vectors: IdRef, major_shapes: IdRef, minor_shapes: IdRef, direction: IdRef, pixel_resolution: IdRef, bidirectional_weight: IdRef, sad_adjustment: IdRef },
+ .OpSubgroupAvcRefConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcRefSetBidirectionalMixDisableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcRefSetBilinearFilterEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcRefEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, payload: IdRef },
+ .OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, packed_reference_field_polarities: IdRef, payload: IdRef },
+ .OpSubgroupAvcRefConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef },
+ .OpSubgroupAvcSicConfigureSkcINTEL => struct { id_result_type: IdResultType, id_result: IdResult, skip_block_partition_type: IdRef, skip_motion_vector_mask: IdRef, motion_vectors: IdRef, bidirectional_weight: IdRef, sad_adjustment: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicConfigureIpeLumaINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_intra_partition_mask: IdRef, intra_neighbour_availabilty: IdRef, left_edge_luma_pixels: IdRef, upper_left_corner_luma_pixel: IdRef, upper_edge_luma_pixels: IdRef, upper_right_edge_luma_pixels: IdRef, sad_adjustment: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicConfigureIpeLumaChromaINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_intra_partition_mask: IdRef, intra_neighbour_availabilty: IdRef, left_edge_luma_pixels: IdRef, upper_left_corner_luma_pixel: IdRef, upper_edge_luma_pixels: IdRef, upper_right_edge_luma_pixels: IdRef, left_edge_chroma_pixels: IdRef, upper_left_corner_chroma_pixel: IdRef, upper_edge_chroma_pixels: IdRef, sad_adjustment: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicGetMotionVectorMaskINTEL => struct { id_result_type: IdResultType, id_result: IdResult, skip_block_partition_type: IdRef, direction: IdRef },
+ .OpSubgroupAvcSicConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_shape_penalty: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_mode_penalty: IdRef, luma_packed_neighbor_modes: IdRef, luma_packed_non_dc_penalty: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, chroma_mode_base_penalty: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicSetBilinearFilterEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_sad_coefficients: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, block_based_skip_type: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicEvaluateIpeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, packed_reference_field_polarities: IdRef, payload: IdRef },
+ .OpSubgroupAvcSicConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetIpeLumaShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetPackedIpeLumaModesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetIpeChromaModeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpSubgroupAvcSicGetInterRawSadsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef },
+ .OpVariableLengthArrayINTEL => struct { id_result_type: IdResultType, id_result: IdResult, lenght: IdRef },
+ .OpSaveMemoryINTEL => struct { id_result_type: IdResultType, id_result: IdResult },
+ .OpRestoreMemoryINTEL => struct { ptr: IdRef },
+ .OpLoopControlINTEL => struct { loop_control_parameters: []const LiteralInteger = &.{} },
+ .OpPtrCastToCrossWorkgroupINTEL => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpCrossWorkgroupCastToPtrINTEL => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef },
+ .OpReadPipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef },
+ .OpWritePipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef },
+ .OpFPGARegINTEL => struct { id_result_type: IdResultType, id_result: IdResult, result: IdRef, input: IdRef },
+ .OpRayQueryGetRayTMinKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetRayFlagsKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetIntersectionTKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionInstanceCustomIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionInstanceIdKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionGeometryIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionPrimitiveIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionBarycentricsKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionFrontFaceKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetIntersectionObjectRayDirectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionObjectRayOriginKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetWorldRayDirectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetWorldRayOriginKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef },
+ .OpRayQueryGetIntersectionObjectToWorldKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpRayQueryGetIntersectionWorldToObjectKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef },
+ .OpAtomicFAddEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef },
+ .OpTypeBufferSurfaceINTEL => struct { id_result: IdResult },
+ .OpTypeStructContinuedINTEL => struct { id_ref: []const IdRef = &.{} },
+ .OpConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} },
+ .OpSpecConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} },
+ };
+ }
};
pub const ImageOperands = packed struct {
Bias: bool align(@alignOf(u32)) = false,
@@ -618,6 +1238,46 @@ pub const ImageOperands = packed struct {
_reserved_bit_29: bool = false,
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
+
+ pub const MakeTexelAvailableKHR: ImageOperands = .{ .MakeTexelAvailable = true };
+ pub const MakeTexelVisibleKHR: ImageOperands = .{ .MakeTexelVisible = true };
+ pub const NonPrivateTexelKHR: ImageOperands = .{ .NonPrivateTexel = true };
+ pub const VolatileTexelKHR: ImageOperands = .{ .VolatileTexel = true };
+
+ pub const Extended = struct {
+ Bias: ?struct { id_ref: IdRef } = null,
+ Lod: ?struct { id_ref: IdRef } = null,
+ Grad: ?struct { id_ref_0: IdRef, id_ref_1: IdRef } = null,
+ ConstOffset: ?struct { id_ref: IdRef } = null,
+ Offset: ?struct { id_ref: IdRef } = null,
+ ConstOffsets: ?struct { id_ref: IdRef } = null,
+ Sample: ?struct { id_ref: IdRef } = null,
+ MinLod: ?struct { id_ref: IdRef } = null,
+ MakeTexelAvailable: ?struct { id_scope: IdScope } = null,
+ MakeTexelVisible: ?struct { id_scope: IdScope } = null,
+ NonPrivateTexel: bool = false,
+ VolatileTexel: bool = false,
+ SignExtend: bool = false,
+ ZeroExtend: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
};
pub const FPFastMathMode = packed struct {
NotNaN: bool align(@alignOf(u32)) = false,
@@ -720,6 +1380,41 @@ pub const LoopControl = packed struct {
_reserved_bit_29: bool = false,
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ Unroll: bool = false,
+ DontUnroll: bool = false,
+ DependencyInfinite: bool = false,
+ DependencyLength: ?struct { literal_integer: LiteralInteger } = null,
+ MinIterations: ?struct { literal_integer: LiteralInteger } = null,
+ MaxIterations: ?struct { literal_integer: LiteralInteger } = null,
+ IterationMultiple: ?struct { literal_integer: LiteralInteger } = null,
+ PeelCount: ?struct { literal_integer: LiteralInteger } = null,
+ PartialCount: ?struct { literal_integer: LiteralInteger } = null,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ InitiationIntervalINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ MaxConcurrencyINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ DependencyArrayINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ PipelineEnableINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ LoopCoalesceINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ MaxInterleavingINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ SpeculatedIterationsINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ NoFusionINTEL: ?struct { literal_integer: LiteralInteger } = null,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
};
pub const FunctionControl = packed struct {
Inline: bool align(@alignOf(u32)) = false,
@@ -788,6 +1483,10 @@ pub const MemorySemantics = packed struct {
_reserved_bit_29: bool = false,
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
+
+ pub const OutputMemoryKHR: MemorySemantics = .{ .OutputMemory = true };
+ pub const MakeAvailableKHR: MemorySemantics = .{ .MakeAvailable = true };
+ pub const MakeVisibleKHR: MemorySemantics = .{ .MakeVisible = true };
};
pub const MemoryAccess = packed struct {
Volatile: bool align(@alignOf(u32)) = false,
@@ -822,6 +1521,45 @@ pub const MemoryAccess = packed struct {
_reserved_bit_29: bool = false,
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
+
+ pub const MakePointerAvailableKHR: MemoryAccess = .{ .MakePointerAvailable = true };
+ pub const MakePointerVisibleKHR: MemoryAccess = .{ .MakePointerVisible = true };
+ pub const NonPrivatePointerKHR: MemoryAccess = .{ .NonPrivatePointer = true };
+
+ pub const Extended = struct {
+ Volatile: bool = false,
+ Aligned: ?struct { literal_integer: LiteralInteger } = null,
+ Nontemporal: bool = false,
+ MakePointerAvailable: ?struct { id_scope: IdScope } = null,
+ MakePointerVisible: ?struct { id_scope: IdScope } = null,
+ NonPrivatePointer: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
};
pub const KernelProfilingInfo = packed struct {
CmdExecTime: bool align(@alignOf(u32)) = false,
@@ -932,7 +1670,6 @@ pub const SourceLanguage = enum(u32) {
OpenCL_C = 3,
OpenCL_CPP = 4,
HLSL = 5,
- _,
};
pub const ExecutionModel = enum(u32) {
Vertex = 0,
@@ -944,33 +1681,35 @@ pub const ExecutionModel = enum(u32) {
Kernel = 6,
TaskNV = 5267,
MeshNV = 5268,
- RayGenerationNV = 5313,
RayGenerationKHR = 5313,
- IntersectionNV = 5314,
IntersectionKHR = 5314,
- AnyHitNV = 5315,
AnyHitKHR = 5315,
- ClosestHitNV = 5316,
ClosestHitKHR = 5316,
- MissNV = 5317,
MissKHR = 5317,
- CallableNV = 5318,
CallableKHR = 5318,
- _,
+
+ pub const RayGenerationNV = ExecutionModel.RayGenerationKHR;
+ pub const IntersectionNV = ExecutionModel.IntersectionKHR;
+ pub const AnyHitNV = ExecutionModel.AnyHitKHR;
+ pub const ClosestHitNV = ExecutionModel.ClosestHitKHR;
+ pub const MissNV = ExecutionModel.MissKHR;
+ pub const CallableNV = ExecutionModel.CallableKHR;
};
pub const AddressingModel = enum(u32) {
Logical = 0,
Physical32 = 1,
Physical64 = 2,
PhysicalStorageBuffer64 = 5348,
- _,
+
+ pub const PhysicalStorageBuffer64EXT = AddressingModel.PhysicalStorageBuffer64;
};
pub const MemoryModel = enum(u32) {
Simple = 0,
GLSL450 = 1,
OpenCL = 2,
Vulkan = 3,
- _,
+
+ pub const VulkanKHR = MemoryModel.Vulkan;
};
pub const ExecutionMode = enum(u32) {
Invocations = 0,
@@ -1039,7 +1778,75 @@ pub const ExecutionMode = enum(u32) {
NoGlobalOffsetINTEL = 5895,
NumSIMDWorkitemsINTEL = 5896,
SchedulerTargetFmaxMhzINTEL = 5903,
- _,
+
+ pub const Extended = union(ExecutionMode) {
+ Invocations: struct { literal_integer: LiteralInteger },
+ SpacingEqual,
+ SpacingFractionalEven,
+ SpacingFractionalOdd,
+ VertexOrderCw,
+ VertexOrderCcw,
+ PixelCenterInteger,
+ OriginUpperLeft,
+ OriginLowerLeft,
+ EarlyFragmentTests,
+ PointMode,
+ Xfb,
+ DepthReplacing,
+ DepthGreater,
+ DepthLess,
+ DepthUnchanged,
+ LocalSize: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger },
+ LocalSizeHint: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger },
+ InputPoints,
+ InputLines,
+ InputLinesAdjacency,
+ Triangles,
+ InputTrianglesAdjacency,
+ Quads,
+ Isolines,
+ OutputVertices: struct { vertex_count: LiteralInteger },
+ OutputPoints,
+ OutputLineStrip,
+ OutputTriangleStrip,
+ VecTypeHint: struct { vector_type: LiteralInteger },
+ ContractionOff,
+ Initializer,
+ Finalizer,
+ SubgroupSize: struct { subgroup_size: LiteralInteger },
+ SubgroupsPerWorkgroup: struct { subgroups_per_workgroup: LiteralInteger },
+ SubgroupsPerWorkgroupId: struct { subgroups_per_workgroup: IdRef },
+ LocalSizeId: struct { x_size: IdRef, y_size: IdRef, z_size: IdRef },
+ LocalSizeHintId: struct { local_size_hint: IdRef },
+ PostDepthCoverage,
+ DenormPreserve: struct { target_width: LiteralInteger },
+ DenormFlushToZero: struct { target_width: LiteralInteger },
+ SignedZeroInfNanPreserve: struct { target_width: LiteralInteger },
+ RoundingModeRTE: struct { target_width: LiteralInteger },
+ RoundingModeRTZ: struct { target_width: LiteralInteger },
+ StencilRefReplacingEXT,
+ OutputLinesNV,
+ OutputPrimitivesNV: struct { primitive_count: LiteralInteger },
+ DerivativeGroupQuadsNV,
+ DerivativeGroupLinearNV,
+ OutputTrianglesNV,
+ PixelInterlockOrderedEXT,
+ PixelInterlockUnorderedEXT,
+ SampleInterlockOrderedEXT,
+ SampleInterlockUnorderedEXT,
+ ShadingRateInterlockOrderedEXT,
+ ShadingRateInterlockUnorderedEXT,
+ SharedLocalMemorySizeINTEL: struct { size: LiteralInteger },
+ RoundingModeRTPINTEL: struct { target_width: LiteralInteger },
+ RoundingModeRTNINTEL: struct { target_width: LiteralInteger },
+ FloatingPointModeALTINTEL: struct { target_width: LiteralInteger },
+ FloatingPointModeIEEEINTEL: struct { target_width: LiteralInteger },
+ MaxWorkgroupSizeINTEL: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger, literal_integer_2: LiteralInteger },
+ MaxWorkDimINTEL: struct { literal_integer: LiteralInteger },
+ NoGlobalOffsetINTEL,
+ NumSIMDWorkitemsINTEL: struct { literal_integer: LiteralInteger },
+ SchedulerTargetFmaxMhzINTEL: struct { literal_integer: LiteralInteger },
+ };
};
pub const StorageClass = enum(u32) {
UniformConstant = 0,
@@ -1065,7 +1872,14 @@ pub const StorageClass = enum(u32) {
CodeSectionINTEL = 5605,
DeviceOnlyINTEL = 5936,
HostOnlyINTEL = 5937,
- _,
+
+ pub const CallableDataNV = StorageClass.CallableDataKHR;
+ pub const IncomingCallableDataNV = StorageClass.IncomingCallableDataKHR;
+ pub const RayPayloadNV = StorageClass.RayPayloadKHR;
+ pub const HitAttributeNV = StorageClass.HitAttributeKHR;
+ pub const IncomingRayPayloadNV = StorageClass.IncomingRayPayloadKHR;
+ pub const ShaderRecordBufferNV = StorageClass.ShaderRecordBufferKHR;
+ pub const PhysicalStorageBufferEXT = StorageClass.PhysicalStorageBuffer;
};
pub const Dim = enum(u32) {
@"1D" = 0,
@@ -1075,7 +1889,6 @@ pub const Dim = enum(u32) {
Rect = 4,
Buffer = 5,
SubpassData = 6,
- _,
};
pub const SamplerAddressingMode = enum(u32) {
None = 0,
@@ -1083,12 +1896,10 @@ pub const SamplerAddressingMode = enum(u32) {
Clamp = 2,
Repeat = 3,
RepeatMirrored = 4,
- _,
};
pub const SamplerFilterMode = enum(u32) {
Nearest = 0,
Linear = 1,
- _,
};
pub const ImageFormat = enum(u32) {
Unknown = 0,
@@ -1133,7 +1944,6 @@ pub const ImageFormat = enum(u32) {
R8ui = 39,
R64ui = 40,
R64i = 41,
- _,
};
pub const ImageChannelOrder = enum(u32) {
R = 0,
@@ -1156,7 +1966,6 @@ pub const ImageChannelOrder = enum(u32) {
sRGBA = 17,
sBGRA = 18,
ABGR = 19,
- _,
};
pub const ImageChannelDataType = enum(u32) {
SnormInt8 = 0,
@@ -1176,36 +1985,30 @@ pub const ImageChannelDataType = enum(u32) {
Float = 14,
UnormInt24 = 15,
UnormInt101010_2 = 16,
- _,
};
pub const FPRoundingMode = enum(u32) {
RTE = 0,
RTZ = 1,
RTP = 2,
RTN = 3,
- _,
};
pub const FPDenormMode = enum(u32) {
Preserve = 0,
FlushToZero = 1,
- _,
};
pub const FPOperationMode = enum(u32) {
IEEE = 0,
ALT = 1,
- _,
};
pub const LinkageType = enum(u32) {
Export = 0,
Import = 1,
LinkOnceODR = 2,
- _,
};
pub const AccessQualifier = enum(u32) {
ReadOnly = 0,
WriteOnly = 1,
ReadWrite = 2,
- _,
};
pub const FunctionParameterAttribute = enum(u32) {
Zext = 0,
@@ -1216,7 +2019,6 @@ pub const FunctionParameterAttribute = enum(u32) {
NoCapture = 5,
NoWrite = 6,
NoReadWrite = 7,
- _,
};
pub const Decoration = enum(u32) {
RelaxedPrecision = 0,
@@ -1278,11 +2080,8 @@ pub const Decoration = enum(u32) {
PerTaskNV = 5273,
PerVertexNV = 5285,
NonUniform = 5300,
- NonUniformEXT = 5300,
RestrictPointer = 5355,
- RestrictPointerEXT = 5355,
AliasedPointer = 5356,
- AliasedPointerEXT = 5356,
SIMTCallINTEL = 5599,
ReferencedIndirectlyINTEL = 5602,
ClobberINTEL = 5607,
@@ -1293,9 +2092,7 @@ pub const Decoration = enum(u32) {
StackCallINTEL = 5627,
GlobalVariableOffsetINTEL = 5628,
CounterBuffer = 5634,
- HlslCounterBufferGOOGLE = 5634,
UserSemantic = 5635,
- HlslSemanticGOOGLE = 5635,
UserTypeGOOGLE = 5636,
FunctionRoundingModeINTEL = 5822,
FunctionDenormModeINTEL = 5823,
@@ -1322,7 +2119,113 @@ pub const Decoration = enum(u32) {
FunctionFloatingPointModeINTEL = 6080,
SingleElementVectorINTEL = 6085,
VectorComputeCallableFunctionINTEL = 6087,
- _,
+
+ pub const NonUniformEXT = Decoration.NonUniform;
+ pub const RestrictPointerEXT = Decoration.RestrictPointer;
+ pub const AliasedPointerEXT = Decoration.AliasedPointer;
+ pub const HlslCounterBufferGOOGLE = Decoration.CounterBuffer;
+ pub const HlslSemanticGOOGLE = Decoration.UserSemantic;
+
+ pub const Extended = union(Decoration) {
+ RelaxedPrecision,
+ SpecId: struct { specialization_constant_id: LiteralInteger },
+ Block,
+ BufferBlock,
+ RowMajor,
+ ColMajor,
+ ArrayStride: struct { array_stride: LiteralInteger },
+ MatrixStride: struct { matrix_stride: LiteralInteger },
+ GLSLShared,
+ GLSLPacked,
+ CPacked,
+ BuiltIn: struct { built_in: BuiltIn },
+ NoPerspective,
+ Flat,
+ Patch,
+ Centroid,
+ Sample,
+ Invariant,
+ Restrict,
+ Aliased,
+ Volatile,
+ Constant,
+ Coherent,
+ NonWritable,
+ NonReadable,
+ Uniform,
+ UniformId: struct { execution: IdScope },
+ SaturatedConversion,
+ Stream: struct { stream_number: LiteralInteger },
+ Location: struct { location: LiteralInteger },
+ Component: struct { component: LiteralInteger },
+ Index: struct { index: LiteralInteger },
+ Binding: struct { binding_point: LiteralInteger },
+ DescriptorSet: struct { descriptor_set: LiteralInteger },
+ Offset: struct { byte_offset: LiteralInteger },
+ XfbBuffer: struct { xfb_buffer_number: LiteralInteger },
+ XfbStride: struct { xfb_stride: LiteralInteger },
+ FuncParamAttr: struct { function_parameter_attribute: FunctionParameterAttribute },
+ FPRoundingMode: struct { fprounding_mode: FPRoundingMode },
+ FPFastMathMode: struct { fpfast_math_mode: FPFastMathMode },
+ LinkageAttributes: struct { name: LiteralString, linkage_type: LinkageType },
+ NoContraction,
+ InputAttachmentIndex: struct { attachment_index: LiteralInteger },
+ Alignment: struct { alignment: LiteralInteger },
+ MaxByteOffset: struct { max_byte_offset: LiteralInteger },
+ AlignmentId: struct { alignment: IdRef },
+ MaxByteOffsetId: struct { max_byte_offset: IdRef },
+ NoSignedWrap,
+ NoUnsignedWrap,
+ ExplicitInterpAMD,
+ OverrideCoverageNV,
+ PassthroughNV,
+ ViewportRelativeNV,
+ SecondaryViewportRelativeNV: struct { offset: LiteralInteger },
+ PerPrimitiveNV,
+ PerViewNV,
+ PerTaskNV,
+ PerVertexNV,
+ NonUniform,
+ RestrictPointer,
+ AliasedPointer,
+ SIMTCallINTEL: struct { n: LiteralInteger },
+ ReferencedIndirectlyINTEL,
+ ClobberINTEL: struct { register: LiteralString },
+ SideEffectsINTEL,
+ VectorComputeVariableINTEL,
+ FuncParamIOKindINTEL: struct { kind: LiteralInteger },
+ VectorComputeFunctionINTEL,
+ StackCallINTEL,
+ GlobalVariableOffsetINTEL: struct { offset: LiteralInteger },
+ CounterBuffer: struct { counter_buffer: IdRef },
+ UserSemantic: struct { semantic: LiteralString },
+ UserTypeGOOGLE: struct { user_type: LiteralString },
+ FunctionRoundingModeINTEL: struct { target_width: LiteralInteger, fp_rounding_mode: FPRoundingMode },
+ FunctionDenormModeINTEL: struct { target_width: LiteralInteger, fp_denorm_mode: FPDenormMode },
+ RegisterINTEL,
+ MemoryINTEL: struct { memory_type: LiteralString },
+ NumbanksINTEL: struct { banks: LiteralInteger },
+ BankwidthINTEL: struct { bank_width: LiteralInteger },
+ MaxPrivateCopiesINTEL: struct { maximum_copies: LiteralInteger },
+ SinglepumpINTEL,
+ DoublepumpINTEL,
+ MaxReplicatesINTEL: struct { maximum_replicates: LiteralInteger },
+ SimpleDualPortINTEL,
+ MergeINTEL: struct { merge_key: LiteralString, merge_type: LiteralString },
+ BankBitsINTEL: struct { bank_bits: []const LiteralInteger = &.{} },
+ ForcePow2DepthINTEL: struct { force_key: LiteralInteger },
+ BurstCoalesceINTEL,
+ CacheSizeINTEL: struct { cache_size_in_bytes: LiteralInteger },
+ DontStaticallyCoalesceINTEL,
+ PrefetchINTEL: struct { prefetcher_size_in_bytes: LiteralInteger },
+ StallEnableINTEL,
+ FuseLoopsInFunctionINTEL,
+ BufferLocationINTEL: struct { buffer_location_id: LiteralInteger },
+ IOPipeStorageINTEL: struct { io_pipe_id: LiteralInteger },
+ FunctionFloatingPointModeINTEL: struct { target_width: LiteralInteger, fp_operation_mode: FPOperationMode },
+ SingleElementVectorINTEL,
+ VectorComputeCallableFunctionINTEL,
+ };
};
pub const BuiltIn = enum(u32) {
Position = 0,
@@ -1367,15 +2270,10 @@ pub const BuiltIn = enum(u32) {
VertexIndex = 42,
InstanceIndex = 43,
SubgroupEqMask = 4416,
- SubgroupEqMaskKHR = 4416,
SubgroupGeMask = 4417,
- SubgroupGeMaskKHR = 4417,
SubgroupGtMask = 4418,
- SubgroupGtMaskKHR = 4418,
SubgroupLeMask = 4419,
- SubgroupLeMaskKHR = 4419,
SubgroupLtMask = 4420,
- SubgroupLtMaskKHR = 4420,
BaseVertex = 4424,
BaseInstance = 4425,
DrawIndex = 4426,
@@ -1408,42 +2306,47 @@ pub const BuiltIn = enum(u32) {
BaryCoordNV = 5286,
BaryCoordNoPerspNV = 5287,
FragSizeEXT = 5292,
- FragmentSizeNV = 5292,
FragInvocationCountEXT = 5293,
- InvocationsPerPixelNV = 5293,
- LaunchIdNV = 5319,
LaunchIdKHR = 5319,
- LaunchSizeNV = 5320,
LaunchSizeKHR = 5320,
- WorldRayOriginNV = 5321,
WorldRayOriginKHR = 5321,
- WorldRayDirectionNV = 5322,
WorldRayDirectionKHR = 5322,
- ObjectRayOriginNV = 5323,
ObjectRayOriginKHR = 5323,
- ObjectRayDirectionNV = 5324,
ObjectRayDirectionKHR = 5324,
- RayTminNV = 5325,
RayTminKHR = 5325,
- RayTmaxNV = 5326,
RayTmaxKHR = 5326,
- InstanceCustomIndexNV = 5327,
InstanceCustomIndexKHR = 5327,
- ObjectToWorldNV = 5330,
ObjectToWorldKHR = 5330,
- WorldToObjectNV = 5331,
WorldToObjectKHR = 5331,
HitTNV = 5332,
- HitKindNV = 5333,
HitKindKHR = 5333,
- IncomingRayFlagsNV = 5351,
IncomingRayFlagsKHR = 5351,
RayGeometryIndexKHR = 5352,
WarpsPerSMNV = 5374,
SMCountNV = 5375,
WarpIDNV = 5376,
SMIDNV = 5377,
- _,
+
+ pub const SubgroupEqMaskKHR = BuiltIn.SubgroupEqMask;
+ pub const SubgroupGeMaskKHR = BuiltIn.SubgroupGeMask;
+ pub const SubgroupGtMaskKHR = BuiltIn.SubgroupGtMask;
+ pub const SubgroupLeMaskKHR = BuiltIn.SubgroupLeMask;
+ pub const SubgroupLtMaskKHR = BuiltIn.SubgroupLtMask;
+ pub const FragmentSizeNV = BuiltIn.FragSizeEXT;
+ pub const InvocationsPerPixelNV = BuiltIn.FragInvocationCountEXT;
+ pub const LaunchIdNV = BuiltIn.LaunchIdKHR;
+ pub const LaunchSizeNV = BuiltIn.LaunchSizeKHR;
+ pub const WorldRayOriginNV = BuiltIn.WorldRayOriginKHR;
+ pub const WorldRayDirectionNV = BuiltIn.WorldRayDirectionKHR;
+ pub const ObjectRayOriginNV = BuiltIn.ObjectRayOriginKHR;
+ pub const ObjectRayDirectionNV = BuiltIn.ObjectRayDirectionKHR;
+ pub const RayTminNV = BuiltIn.RayTminKHR;
+ pub const RayTmaxNV = BuiltIn.RayTmaxKHR;
+ pub const InstanceCustomIndexNV = BuiltIn.InstanceCustomIndexKHR;
+ pub const ObjectToWorldNV = BuiltIn.ObjectToWorldKHR;
+ pub const WorldToObjectNV = BuiltIn.WorldToObjectKHR;
+ pub const HitKindNV = BuiltIn.HitKindKHR;
+ pub const IncomingRayFlagsNV = BuiltIn.IncomingRayFlagsKHR;
};
pub const Scope = enum(u32) {
CrossDevice = 0,
@@ -1452,9 +2355,9 @@ pub const Scope = enum(u32) {
Subgroup = 3,
Invocation = 4,
QueueFamily = 5,
- QueueFamilyKHR = 5,
ShaderCallKHR = 6,
- _,
+
+ pub const QueueFamilyKHR = Scope.QueueFamily;
};
pub const GroupOperation = enum(u32) {
Reduce = 0,
@@ -1464,13 +2367,11 @@ pub const GroupOperation = enum(u32) {
PartitionedReduceNV = 6,
PartitionedInclusiveScanNV = 7,
PartitionedExclusiveScanNV = 8,
- _,
};
pub const KernelEnqueueFlags = enum(u32) {
NoWait = 0,
WaitKernel = 1,
WaitWorkGroup = 2,
- _,
};
pub const Capability = enum(u32) {
Matrix = 0,
@@ -1550,7 +2451,7 @@ pub const Capability = enum(u32) {
WorkgroupMemoryExplicitLayout16BitAccessKHR = 4430,
SubgroupVoteKHR = 4431,
StorageBuffer16BitAccess = 4433,
- StorageUniform16 = 4434,
+ UniformAndStorageBuffer16BitAccess = 4434,
StoragePushConstant16 = 4435,
StorageInputOutput16 = 4436,
DeviceGroup = 4437,
@@ -1580,7 +2481,7 @@ pub const Capability = enum(u32) {
ShaderClockKHR = 5055,
SampleMaskOverrideCoverageNV = 5249,
GeometryShaderPassthroughNV = 5251,
- ShaderViewportIndexLayerNV = 5254,
+ ShaderViewportIndexLayerEXT = 5254,
ShaderViewportMaskNV = 5255,
ShaderStereoViewNV = 5259,
PerViewAttributesNV = 5260,
@@ -1589,7 +2490,7 @@ pub const Capability = enum(u32) {
ImageFootprintNV = 5282,
FragmentBarycentricNV = 5284,
ComputeDerivativeGroupQuadsNV = 5288,
- ShadingRateNV = 5291,
+ FragmentDensityEXT = 5291,
GroupNonUniformPartitionedNV = 5297,
ShaderNonUniform = 5301,
RuntimeDescriptorArray = 5302,
@@ -1654,21 +2555,37 @@ pub const Capability = enum(u32) {
AtomicFloat32AddEXT = 6033,
AtomicFloat64AddEXT = 6034,
LongConstantCompositeINTEL = 6089,
- _,
+
+ pub const StorageUniformBufferBlock16 = Capability.StorageBuffer16BitAccess;
+ pub const StorageUniform16 = Capability.UniformAndStorageBuffer16BitAccess;
+ pub const ShaderViewportIndexLayerNV = Capability.ShaderViewportIndexLayerEXT;
+ pub const ShadingRateNV = Capability.FragmentDensityEXT;
+ pub const ShaderNonUniformEXT = Capability.ShaderNonUniform;
+ pub const RuntimeDescriptorArrayEXT = Capability.RuntimeDescriptorArray;
+ pub const InputAttachmentArrayDynamicIndexingEXT = Capability.InputAttachmentArrayDynamicIndexing;
+ pub const UniformTexelBufferArrayDynamicIndexingEXT = Capability.UniformTexelBufferArrayDynamicIndexing;
+ pub const StorageTexelBufferArrayDynamicIndexingEXT = Capability.StorageTexelBufferArrayDynamicIndexing;
+ pub const UniformBufferArrayNonUniformIndexingEXT = Capability.UniformBufferArrayNonUniformIndexing;
+ pub const SampledImageArrayNonUniformIndexingEXT = Capability.SampledImageArrayNonUniformIndexing;
+ pub const StorageBufferArrayNonUniformIndexingEXT = Capability.StorageBufferArrayNonUniformIndexing;
+ pub const StorageImageArrayNonUniformIndexingEXT = Capability.StorageImageArrayNonUniformIndexing;
+ pub const InputAttachmentArrayNonUniformIndexingEXT = Capability.InputAttachmentArrayNonUniformIndexing;
+ pub const UniformTexelBufferArrayNonUniformIndexingEXT = Capability.UniformTexelBufferArrayNonUniformIndexing;
+ pub const StorageTexelBufferArrayNonUniformIndexingEXT = Capability.StorageTexelBufferArrayNonUniformIndexing;
+ pub const VulkanMemoryModelKHR = Capability.VulkanMemoryModel;
+ pub const VulkanMemoryModelDeviceScopeKHR = Capability.VulkanMemoryModelDeviceScope;
+ pub const PhysicalStorageBufferAddressesEXT = Capability.PhysicalStorageBufferAddresses;
};
pub const RayQueryIntersection = enum(u32) {
RayQueryCandidateIntersectionKHR = 0,
RayQueryCommittedIntersectionKHR = 1,
- _,
};
pub const RayQueryCommittedIntersectionType = enum(u32) {
RayQueryCommittedIntersectionNoneKHR = 0,
RayQueryCommittedIntersectionTriangleKHR = 1,
RayQueryCommittedIntersectionGeneratedKHR = 2,
- _,
};
pub const RayQueryCandidateIntersectionType = enum(u32) {
RayQueryCandidateIntersectionTriangleKHR = 0,
RayQueryCandidateIntersectionAABBKHR = 1,
- _,
};
diff --git a/src/codegen/spirv/type.zig b/src/codegen/spirv/type.zig
new file mode 100644
index 0000000000..3eef2d8040
--- /dev/null
+++ b/src/codegen/spirv/type.zig
@@ -0,0 +1,433 @@
+//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
+//! which are not representable by Zig directly.
+
+const std = @import("std");
+const assert = std.debug.assert;
+
+const spec = @import("spec.zig");
+
+pub const Type = extern union {
+ tag_if_small_enough: Tag,
+ ptr_otherwise: *Payload,
+
+ /// A reference to another SPIR-V type.
+ pub const Ref = usize;
+
+ pub fn initTag(comptime small_tag: Tag) Type {
+ comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
+ return .{ .tag_if_small_enough = small_tag };
+ }
+
+ pub fn initPayload(pl: *Payload) Type {
+ assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
+ return .{ .ptr_otherwise = pl };
+ }
+
+ pub fn tag(self: Type) Tag {
+ if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
+ return self.tag_if_small_enough;
+ } else {
+ return self.ptr_otherwise.tag;
+ }
+ }
+
+ pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
+ if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
+ return null;
+
+ if (self.ptr_otherwise.tag == t)
+ return self.payload(t);
+
+ return null;
+ }
+
+ /// Access the payload of a type directly.
+ pub fn payload(self: Type, comptime t: Tag) *t.Type() {
+ assert(self.tag() == t);
+ return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
+ }
+
+ /// Perform a shallow equality test, comparing two types while assuming that any child types
+ /// are equal only if their references are equal.
+ pub fn eqlShallow(a: Type, b: Type) bool {
+ if (a.tag_if_small_enough == b.tag_if_small_enough)
+ return true;
+
+ const tag_a = a.tag();
+ const tag_b = b.tag();
+ if (tag_a != tag_b)
+ return false;
+
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ const t = @field(Tag, field.name);
+ if (t == tag_a) {
+ return eqlPayloads(t, a, b);
+ }
+ }
+
+ unreachable;
+ }
+
+ /// Compare the payload of two compatible tags, given that we already know the tag of both types.
+ fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
+ switch (t) {
+ .void,
+ .bool,
+ .sampler,
+ .event,
+ .device_event,
+ .reserve_id,
+ .queue,
+ .pipe_storage,
+ .named_barrier,
+ => return true,
+ .int,
+ .float,
+ .vector,
+ .matrix,
+ .sampled_image,
+ .array,
+ .runtime_array,
+ .@"opaque",
+ .pointer,
+ .pipe,
+ .image,
+ => return std.meta.eql(a.payload(t).*, b.payload(t).*),
+ .@"struct" => {
+ const struct_a = a.payload(.@"struct");
+ const struct_b = b.payload(.@"struct");
+ if (struct_a.members.len != struct_b.members.len)
+ return false;
+ for (struct_a.members) |mem_a, i| {
+ if (!std.meta.eql(mem_a, struct_b.members[i]))
+ return false;
+ }
+ return true;
+ },
+ .@"function" => {
+ const fn_a = a.payload(.function);
+ const fn_b = b.payload(.function);
+ if (fn_a.return_type != fn_b.return_type)
+ return false;
+ return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
+ },
+ }
+ }
+
+ /// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
+ pub fn hashShallow(self: Type) u64 {
+ var hasher = std.hash.Wyhash.init(0);
+ const t = self.tag();
+ std.hash.autoHash(&hasher, t);
+
+ inline for (@typeInfo(Tag).Enum.fields) |field| {
+ if (@field(Tag, field.name) == t) {
+ switch (@field(Tag, field.name)) {
+ .void,
+ .bool,
+ .sampler,
+ .event,
+ .device_event,
+ .reserve_id,
+ .queue,
+ .pipe_storage,
+ .named_barrier,
+ => {},
+ else => self.hashPayload(@field(Tag, field.name), &hasher),
+ }
+ }
+ }
+
+ return hasher.final();
+ }
+
+ /// Perform a shallow hash, given that we know the tag of the field ahead of time.
+ fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
+ const fields = @typeInfo(t.Type()).Struct.fields;
+ const pl = self.payload(t);
+ comptime assert(std.mem.eql(u8, fields[0].name, "base"));
+ inline for (fields[1..]) |field| { // Skip the 'base' field.
+ std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
+ }
+ }
+
+ /// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
+ pub const ShallowHashContext32 = struct {
+ pub fn hash(self: @This(), t: Type) u32 {
+ _ = self;
+ return @truncate(u32, t.hashShallow());
+ }
+ pub fn eql(self: @This(), a: Type, b: Type) bool {
+ _ = self;
+ return a.eqlShallow(b);
+ }
+ };
+
+ /// Return the reference to any child type. Asserts the type is one of:
+ /// - Vectors
+ /// - Matrices
+ /// - Images
+ /// - SampledImages,
+ /// - Arrays
+ /// - RuntimeArrays
+ /// - Pointers
+ pub fn childType(self: Type) Ref {
+ return switch (self.tag()) {
+ .vector => self.payload(.vector).component_type,
+ .matrix => self.payload(.matrix).column_type,
+ .image => self.payload(.image).sampled_type,
+ .sampled_image => self.payload(.sampled_image).image_type,
+ .array => self.payload(.array).element_type,
+ .runtime_array => self.payload(.runtime_array).element_type,
+ .pointer => self.payload(.pointer).child_type,
+ else => unreachable,
+ };
+ }
+
+ pub const Tag = enum(usize) {
+ void,
+ bool,
+ sampler,
+ event,
+ device_event,
+ reserve_id,
+ queue,
+ pipe_storage,
+ named_barrier,
+
+ // After this, the tag requires a payload.
+ int,
+ float,
+ vector,
+ matrix,
+ image,
+ sampled_image,
+ array,
+ runtime_array,
+ @"struct",
+ @"opaque",
+ pointer,
+ function,
+ pipe,
+
+ pub const last_no_payload_tag = Tag.named_barrier;
+ pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
+
+ pub fn Type(comptime t: Tag) type {
+ return switch (t) {
+ .void, .bool, .sampler, .event, .device_event, .reserve_id, .queue, .pipe_storage, .named_barrier => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
+ .int => Payload.Int,
+ .float => Payload.Float,
+ .vector => Payload.Vector,
+ .matrix => Payload.Matrix,
+ .image => Payload.Image,
+ .sampled_image => Payload.SampledImage,
+ .array => Payload.Array,
+ .runtime_array => Payload.RuntimeArray,
+ .@"struct" => Payload.Struct,
+ .@"opaque" => Payload.Opaque,
+ .pointer => Payload.Pointer,
+ .function => Payload.Function,
+ .pipe => Payload.Pipe,
+ };
+ }
+ };
+
+ pub const Payload = struct {
+ tag: Tag,
+
+ pub const Int = struct {
+ base: Payload = .{ .tag = .int },
+ width: u32,
+ signedness: std.builtin.Signedness,
+ };
+
+ pub const Float = struct {
+ base: Payload = .{ .tag = .float },
+ width: u32,
+ };
+
+ pub const Vector = struct {
+ base: Payload = .{ .tag = .vector },
+ component_type: Ref,
+ component_count: u32,
+ };
+
+ pub const Matrix = struct {
+ base: Payload = .{ .tag = .matrix },
+ column_type: Ref,
+ column_count: u32,
+ };
+
+ pub const Image = struct {
+ base: Payload = .{ .tag = .image },
+ sampled_type: Ref,
+ dim: spec.Dim,
+ depth: enum(u2) {
+ no = 0,
+ yes = 1,
+ maybe = 2,
+ },
+ arrayed: bool,
+ multisampled: bool,
+ sampled: enum(u2) {
+ known_at_runtime = 0,
+ with_sampler = 1,
+ without_sampler = 2,
+ },
+ format: spec.ImageFormat,
+ access_qualifier: ?spec.AccessQualifier,
+ };
+
+ pub const SampledImage = struct {
+ base: Payload = .{ .tag = .sampled_image },
+ image_type: Ref,
+ };
+
+ pub const Array = struct {
+ base: Payload = .{ .tag = .array },
+ element_type: Ref,
+ /// Note: Must be emitted as constant, not as literal!
+ length: u32,
+ /// Type has the 'ArrayStride' decoration.
+ /// If zero, no stride is present.
+ array_stride: u32,
+ };
+
+ pub const RuntimeArray = struct {
+ base: Payload = .{ .tag = .runtime_array },
+ element_type: Ref,
+ /// Type has the 'ArrayStride' decoration.
+ /// If zero, no stride is present.
+ array_stride: u32,
+ };
+
+ pub const Struct = struct {
+ base: Payload = .{ .tag = .@"struct" },
+ members: []Member,
+ decorations: StructDecorations,
+
+ /// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
+ /// order of the `members` slice and `MemberDecorations` struct.
+ member_decoration_extra: []u32,
+
+ pub const Member = struct {
+ ty: Ref,
+ offset: u32,
+ decorations: MemberDecorations,
+ };
+
+ pub const StructDecorations = packed struct {
+ /// Type has the 'Block' decoration.
+ block: bool,
+ /// Type has the 'BufferBlock' decoration.
+ buffer_block: bool,
+ /// Type has the 'GLSLShared' decoration.
+ glsl_shared: bool,
+ /// Type has the 'GLSLPacked' decoration.
+ glsl_packed: bool,
+ /// Type has the 'CPacked' decoration.
+ c_packed: bool,
+ };
+
+ pub const MemberDecorations = packed struct {
+ /// Matrix layout for (arrays of) matrices. If this field is not .none,
+ /// then there is also an extra field containing the matrix stride corresponding
+ /// to the 'MatrixStride' decoration.
+ matrix_layout: enum(u2) {
+ /// Member has the 'RowMajor' decoration. The member type
+ /// must be a matrix or an array of matrices.
+ row_major,
+ /// Member has the 'ColMajor' decoration. The member type
+ /// must be a matrix or an array of matrices.
+ col_major,
+ /// Member is not a matrix or array of matrices.
+ none,
+ },
+
+ // Regular decorations, these do not imply extra fields.
+
+ /// Member has the 'NoPerspective' decoration.
+ no_perspective: bool,
+ /// Member has the 'Flat' decoration.
+ flat: bool,
+ /// Member has the 'Patch' decoration.
+ patch: bool,
+ /// Member has the 'Centroid' decoration.
+ centroid: bool,
+ /// Member has the 'Sample' decoration.
+ sample: bool,
+ /// Member has the 'Invariant' decoration.
+ /// Note: requires parent struct to have 'Block'.
+ invariant: bool,
+ /// Member has the 'Volatile' decoration.
+ @"volatile": bool,
+ /// Member has the 'Coherent' decoration.
+ coherent: bool,
+ /// Member has the 'NonWritable' decoration.
+ non_writable: bool,
+ /// Member has the 'NonReadable' decoration.
+ non_readable: bool,
+
+ // The following decorations all imply extra field(s).
+
+ /// Member has the 'BuiltIn' decoration.
+ /// This decoration has an extra field of type `spec.BuiltIn`.
+ /// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
+ /// Note: Each builtin may only be reachable once for a particular entry point.
+ /// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
+ builtin: bool,
+ /// Member has the 'Stream' decoration.
+ /// This member has an extra field of type `u32`.
+ stream: bool,
+ /// Member has the 'Location' decoration.
+ /// This member has an extra field of type `u32`.
+ location: bool,
+ /// Member has the 'Component' decoration.
+ /// This member has an extra field of type `u32`.
+ component: bool,
+ /// Member has the 'XfbBuffer' decoration.
+ /// This member has an extra field of type `u32`.
+ xfb_buffer: bool,
+ /// Member has the 'XfbStride' decoration.
+ /// This member has an extra field of type `u32`.
+ xfb_stride: bool,
+ /// Member has the 'UserSemantic' decoration.
+ /// This member has an extra field of type `[]u8`, which is encoded
+ /// by an `u32` containing the number of chars exactly, and then the string padded to
+ /// a multiple of 4 bytes with zeroes.
+ user_semantic: bool,
+ };
+ };
+
+ pub const Opaque = struct {
+ base: Payload = .{ .tag = .@"opaque" },
+ name: []u8,
+ };
+
+ pub const Pointer = struct {
+ base: Payload = .{ .tag = .pointer },
+ storage_class: spec.StorageClass,
+ child_type: Ref,
+ /// Type has the 'ArrayStride' decoration.
+ /// This is valid for pointers to elements of an array.
+ /// If zero, no stride is present.
+ array_stride: u32,
+ /// Type has the 'Alignment' decoration.
+ alignment: ?u32,
+ /// Type has the 'MaxByteOffset' decoration.
+ max_byte_offset: ?u32,
+ };
+
+ pub const Function = struct {
+ base: Payload = .{ .tag = .function },
+ return_type: Ref,
+ parameters: []Ref,
+ };
+
+ pub const Pipe = struct {
+ base: Payload = .{ .tag = .pipe },
+ qualifier: spec.AccessQualifier,
+ };
+ };
+};