aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorAli Cheraghi <alichraghi@proton.me>2025-08-03 13:16:35 +0330
committerAli Cheraghi <alichraghi@proton.me>2025-08-03 13:16:49 +0330
commit246e1de55485b0b4e9392529778b8f50275e204a (patch)
tree3a9095b18eec5012d2728f68bef452010497f419 /src/codegen
parent58b9200106c0eb721a13aea13e4ce55c4c0e340b (diff)
downloadzig-246e1de55485b0b4e9392529778b8f50275e204a.tar.gz
zig-246e1de55485b0b4e9392529778b8f50275e204a.zip
Watch: do not fail when file is removed
before this we would get a crash
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/spirv/Assembler.zig1087
-rw-r--r--src/codegen/spirv/CodeGen.zig6168
-rw-r--r--src/codegen/spirv/Module.zig955
-rw-r--r--src/codegen/spirv/Section.zig282
-rw-r--r--src/codegen/spirv/extinst.zig.grammar.json11
-rw-r--r--src/codegen/spirv/spec.zig18428
6 files changed, 26931 insertions, 0 deletions
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
new file mode 100644
index 0000000000..6e4e5ca558
--- /dev/null
+++ b/src/codegen/spirv/Assembler.zig
@@ -0,0 +1,1087 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+const CodeGen = @import("CodeGen.zig");
+const Decl = @import("Module.zig").Decl;
+
+const spec = @import("spec.zig");
+const Opcode = spec.Opcode;
+const Word = spec.Word;
+const Id = spec.Id;
+const StorageClass = spec.StorageClass;
+
+const Assembler = @This();
+
+cg: *CodeGen,
+errors: std.ArrayListUnmanaged(ErrorMsg) = .empty,
+src: []const u8 = undefined,
+/// `self.src` tokenized.
+tokens: std.ArrayListUnmanaged(Token) = .empty,
+current_token: u32 = 0,
+/// The instruction that is currently being parsed or has just been parsed.
+inst: struct {
+ opcode: Opcode = undefined,
+ operands: std.ArrayListUnmanaged(Operand) = .empty,
+ string_bytes: std.ArrayListUnmanaged(u8) = .empty,
+
+ fn result(self: @This()) ?AsmValue.Ref {
+ for (self.operands.items[0..@min(self.operands.items.len, 2)]) |op| {
+ switch (op) {
+ .result_id => |index| return index,
+ else => {},
+ }
+ }
+ return null;
+ }
+} = .{},
+value_map: std.StringArrayHashMapUnmanaged(AsmValue) = .{},
+inst_map: std.StringArrayHashMapUnmanaged(void) = .empty,
+
+const Operand = union(enum) {
+ /// Any 'simple' 32-bit value. This could be a mask or
+ /// enumerant, etc, depending on the operands.
+ value: u32,
+ /// An int- or float literal encoded as 1 word.
+ literal32: u32,
+ /// An int- or float literal encoded as 2 words.
+ literal64: u64,
+ /// A result-id which is assigned to in this instruction.
+ /// If present, this is the first operand of the instruction.
+ result_id: AsmValue.Ref,
+ /// A result-id which referred to (not assigned to) in this instruction.
+ ref_id: AsmValue.Ref,
+ /// Offset into `inst.string_bytes`. The string ends at the next zero-terminator.
+ string: u32,
+};
+
+pub fn deinit(self: *Assembler) void {
+ const gpa = self.cg.module.gpa;
+ for (self.errors.items) |err| gpa.free(err.msg);
+ self.tokens.deinit(gpa);
+ self.errors.deinit(gpa);
+ self.inst.operands.deinit(gpa);
+ self.inst.string_bytes.deinit(gpa);
+ self.value_map.deinit(gpa);
+ self.inst_map.deinit(gpa);
+}
+
+const Error = error{ AssembleFail, OutOfMemory };
+
+pub fn assemble(self: *Assembler, src: []const u8) Error!void {
+ const gpa = self.cg.module.gpa;
+
+ self.src = src;
+ self.errors.clearRetainingCapacity();
+
+ // Populate the opcode map if it isn't already
+ if (self.inst_map.count() == 0) {
+ const instructions = spec.InstructionSet.core.instructions();
+ try self.inst_map.ensureUnusedCapacity(gpa, @intCast(instructions.len));
+ for (spec.InstructionSet.core.instructions(), 0..) |inst, i| {
+ const entry = try self.inst_map.getOrPut(gpa, inst.name);
+ assert(entry.index == i);
+ }
+ }
+
+ try self.tokenize();
+ while (!self.testToken(.eof)) {
+ try self.parseInstruction();
+ try self.processInstruction();
+ }
+
+ if (self.errors.items.len > 0) return error.AssembleFail;
+}
+
+const ErrorMsg = struct {
+ /// The offset in bytes from the start of `src` that this error occured.
+ byte_offset: u32,
+ msg: []const u8,
+};
+
+fn addError(self: *Assembler, offset: u32, comptime fmt: []const u8, args: anytype) !void {
+ const gpa = self.cg.module.gpa;
+ const msg = try std.fmt.allocPrint(gpa, fmt, args);
+ errdefer gpa.free(msg);
+ try self.errors.append(gpa, .{
+ .byte_offset = offset,
+ .msg = msg,
+ });
+}
+
+fn fail(self: *Assembler, offset: u32, comptime fmt: []const u8, args: anytype) Error {
+ try self.addError(offset, fmt, args);
+ return error.AssembleFail;
+}
+
+fn todo(self: *Assembler, comptime fmt: []const u8, args: anytype) Error {
+ return self.fail(0, "todo: " ++ fmt, args);
+}
+
+const AsmValue = union(enum) {
+ /// The results are stored in an array hash map, and can be referred
+ /// to either by name (without the %), or by values of this index type.
+ pub const Ref = u32;
+
+ /// The RHS of the current instruction.
+ just_declared,
+ /// A placeholder for ref-ids of which the result-id is not yet known.
+ /// It will be further resolved at a later stage to a more concrete forward reference.
+ unresolved_forward_reference,
+ /// A normal result produced by a different instruction.
+ value: Id,
+ /// A type registered into the module's type system.
+ ty: Id,
+ /// A pre-supplied constant integer value.
+ constant: u32,
+ string: []const u8,
+
+ /// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
+ /// is of a variant that allows the result to be obtained (not an unresolved
+ /// forward declaration, not in the process of being declared, etc).
+ pub fn resultId(self: AsmValue) Id {
+ return switch (self) {
+ .just_declared,
+ .unresolved_forward_reference,
+ // TODO: Lower this value as constant?
+ .constant,
+ .string,
+ => unreachable,
+ .value => |result| result,
+ .ty => |result| result,
+ };
+ }
+};
+
+/// Attempt to process the instruction currently in `self.inst`.
+/// This for example emits the instruction in the module or function, or
+/// records type definitions.
+/// If this function returns `error.AssembleFail`, an explanatory
+/// error message has already been emitted into `self.errors`.
+fn processInstruction(self: *Assembler) !void {
+ const module = self.cg.module;
+ const result: AsmValue = switch (self.inst.opcode) {
+ .OpEntryPoint => {
+ return self.fail(self.currentToken().start, "cannot export entry points in assembly", .{});
+ },
+ .OpExecutionMode, .OpExecutionModeId => {
+ return self.fail(self.currentToken().start, "cannot set execution mode in assembly", .{});
+ },
+ .OpCapability => {
+ try module.addCapability(@enumFromInt(self.inst.operands.items[0].value));
+ return;
+ },
+ .OpExtension => {
+ const ext_name_offset = self.inst.operands.items[0].string;
+ const ext_name = std.mem.sliceTo(self.inst.string_bytes.items[ext_name_offset..], 0);
+ try module.addExtension(ext_name);
+ return;
+ },
+ .OpExtInstImport => blk: {
+ const set_name_offset = self.inst.operands.items[1].string;
+ const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0);
+ const set_tag = std.meta.stringToEnum(spec.InstructionSet, set_name) orelse {
+ return self.fail(set_name_offset, "unknown instruction set: {s}", .{set_name});
+ };
+ break :blk .{ .value = try module.importInstructionSet(set_tag) };
+ },
+ else => switch (self.inst.opcode.class()) {
+ .type_declaration => try self.processTypeInstruction(),
+ else => (try self.processGenericInstruction()) orelse return,
+ },
+ };
+
+ const result_ref = self.inst.result().?;
+ switch (self.value_map.values()[result_ref]) {
+ .just_declared => self.value_map.values()[result_ref] = result,
+ else => {
+ // TODO: Improve source location.
+ const name = self.value_map.keys()[result_ref];
+ return self.fail(0, "duplicate definition of %{s}", .{name});
+ },
+ }
+}
+
+fn processTypeInstruction(self: *Assembler) !AsmValue {
+ const gpa = self.cg.module.gpa;
+ const module = self.cg.module;
+ const operands = self.inst.operands.items;
+ const section = &module.sections.globals;
+ const id = switch (self.inst.opcode) {
+ .OpTypeVoid => try module.voidType(),
+ .OpTypeBool => try module.boolType(),
+ .OpTypeInt => blk: {
+ const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
+ 0 => .unsigned,
+ 1 => .signed,
+ else => {
+ // TODO: Improve source location.
+ return self.fail(0, "{} is not a valid signedness (expected 0 or 1)", .{operands[2].literal32});
+ },
+ };
+ const width = std.math.cast(u16, operands[1].literal32) orelse {
+ return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
+ };
+ break :blk try module.intType(signedness, width);
+ },
+ .OpTypeFloat => blk: {
+ const bits = operands[1].literal32;
+ switch (bits) {
+ 16, 32, 64 => {},
+ else => {
+ return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
+ },
+ }
+ break :blk try module.floatType(@intCast(bits));
+ },
+ .OpTypeVector => blk: {
+ const child_type = try self.resolveRefId(operands[1].ref_id);
+ break :blk try module.vectorType(operands[2].literal32, child_type);
+ },
+ .OpTypeArray => {
+ // TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant),
+ // and so some consideration must be taken when entering this in the type system.
+ return self.todo("process OpTypeArray", .{});
+ },
+ .OpTypeRuntimeArray => blk: {
+ const element_type = try self.resolveRefId(operands[1].ref_id);
+ const result_id = module.allocId();
+ try section.emit(module.gpa, .OpTypeRuntimeArray, .{
+ .id_result = result_id,
+ .element_type = element_type,
+ });
+ break :blk result_id;
+ },
+ .OpTypePointer => blk: {
+ const storage_class: StorageClass = @enumFromInt(operands[1].value);
+ const child_type = try self.resolveRefId(operands[2].ref_id);
+ const result_id = module.allocId();
+ try section.emit(module.gpa, .OpTypePointer, .{
+ .id_result = result_id,
+ .storage_class = storage_class,
+ .type = child_type,
+ });
+ break :blk result_id;
+ },
+ .OpTypeStruct => blk: {
+ const ids = try gpa.alloc(Id, operands[1..].len);
+ defer gpa.free(ids);
+ for (operands[1..], ids) |op, *id| id.* = try self.resolveRefId(op.ref_id);
+ break :blk try module.structType(ids, null, null, .none);
+ },
+ .OpTypeImage => blk: {
+ const sampled_type = try self.resolveRefId(operands[1].ref_id);
+ const result_id = module.allocId();
+ try section.emit(gpa, .OpTypeImage, .{
+ .id_result = result_id,
+ .sampled_type = sampled_type,
+ .dim = @enumFromInt(operands[2].value),
+ .depth = operands[3].literal32,
+ .arrayed = operands[4].literal32,
+ .ms = operands[5].literal32,
+ .sampled = operands[6].literal32,
+ .image_format = @enumFromInt(operands[7].value),
+ });
+ break :blk result_id;
+ },
+ .OpTypeSampler => blk: {
+ const result_id = module.allocId();
+ try section.emit(gpa, .OpTypeSampler, .{ .id_result = result_id });
+ break :blk result_id;
+ },
+ .OpTypeSampledImage => blk: {
+ const image_type = try self.resolveRefId(operands[1].ref_id);
+ const result_id = module.allocId();
+ try section.emit(gpa, .OpTypeSampledImage, .{ .id_result = result_id, .image_type = image_type });
+ break :blk result_id;
+ },
+ .OpTypeFunction => blk: {
+ const param_operands = operands[2..];
+ const return_type = try self.resolveRefId(operands[1].ref_id);
+
+ const param_types = try module.gpa.alloc(Id, param_operands.len);
+ defer module.gpa.free(param_types);
+ for (param_types, param_operands) |*param, operand| {
+ param.* = try self.resolveRefId(operand.ref_id);
+ }
+ const result_id = module.allocId();
+ try section.emit(module.gpa, .OpTypeFunction, .{
+ .id_result = result_id,
+ .return_type = return_type,
+ .id_ref_2 = param_types,
+ });
+ break :blk result_id;
+ },
+ else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}),
+ };
+
+ return .{ .ty = id };
+}
+
+/// - No forward references are allowed in operands.
+/// - Target section is determined from instruction type.
+fn processGenericInstruction(self: *Assembler) !?AsmValue {
+ const module = self.cg.module;
+ const target = module.zcu.getTarget();
+ const operands = self.inst.operands.items;
+ var maybe_spv_decl_index: ?Decl.Index = null;
+ const section = switch (self.inst.opcode.class()) {
+ .constant_creation => &module.sections.globals,
+ .annotation => &module.sections.annotations,
+ .type_declaration => unreachable, // Handled elsewhere.
+ else => switch (self.inst.opcode) {
+ .OpEntryPoint => unreachable,
+ .OpExecutionMode, .OpExecutionModeId => &module.sections.execution_modes,
+ .OpVariable => section: {
+ const storage_class: spec.StorageClass = @enumFromInt(operands[2].value);
+ if (storage_class == .function) break :section &self.cg.prologue;
+ maybe_spv_decl_index = try module.allocDecl(.global);
+ if (!target.cpu.has(.spirv, .v1_4) and storage_class != .input and storage_class != .output) {
+ // Before version 1.4, the interface’s storage classes are limited to the Input and Output
+ break :section &module.sections.globals;
+ }
+ try self.cg.decl_deps.put(module.gpa, maybe_spv_decl_index.?, {});
+ try module.declareDeclDeps(maybe_spv_decl_index.?, &.{});
+ break :section &module.sections.globals;
+ },
+ else => &self.cg.body,
+ },
+ };
+
+ var maybe_result_id: ?Id = null;
+ const first_word = section.instructions.items.len;
+ // At this point we're not quite sure how many operands this instruction is
+ // going to have, so insert 0 and patch up the actual opcode word later.
+ try section.ensureUnusedCapacity(module.gpa, 1);
+ section.writeWord(0);
+
+ for (operands) |operand| {
+ switch (operand) {
+ .value, .literal32 => |word| {
+ try section.ensureUnusedCapacity(module.gpa, 1);
+ section.writeWord(word);
+ },
+ .literal64 => |dword| {
+ try section.ensureUnusedCapacity(module.gpa, 2);
+ section.writeDoubleWord(dword);
+ },
+ .result_id => {
+ maybe_result_id = if (maybe_spv_decl_index) |spv_decl_index|
+ module.declPtr(spv_decl_index).result_id
+ else
+ module.allocId();
+ try section.ensureUnusedCapacity(module.gpa, 1);
+ section.writeOperand(Id, maybe_result_id.?);
+ },
+ .ref_id => |index| {
+ const result = try self.resolveRef(index);
+ try section.ensureUnusedCapacity(module.gpa, 1);
+ section.writeOperand(spec.Id, result.resultId());
+ },
+ .string => |offset| {
+ const text = std.mem.sliceTo(self.inst.string_bytes.items[offset..], 0);
+ const size = std.math.divCeil(usize, text.len + 1, @sizeOf(Word)) catch unreachable;
+ try section.ensureUnusedCapacity(module.gpa, size);
+ section.writeOperand(spec.LiteralString, text);
+ },
+ }
+ }
+
+ const actual_word_count = section.instructions.items.len - first_word;
+ section.instructions.items[first_word] |= @as(u32, @as(u16, @intCast(actual_word_count))) << 16 | @intFromEnum(self.inst.opcode);
+
+ if (maybe_result_id) |result| return .{ .value = result };
+ return null;
+}
+
+fn resolveMaybeForwardRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
+ const value = self.value_map.values()[ref];
+ switch (value) {
+ .just_declared => {
+ const name = self.value_map.keys()[ref];
+ // TODO: Improve source location.
+ return self.fail(0, "self-referential parameter %{s}", .{name});
+ },
+ else => return value,
+ }
+}
+
+fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
+ const value = try self.resolveMaybeForwardRef(ref);
+ switch (value) {
+ .just_declared => unreachable,
+ .unresolved_forward_reference => {
+ const name = self.value_map.keys()[ref];
+ // TODO: Improve source location.
+ return self.fail(0, "reference to undeclared result-id %{s}", .{name});
+ },
+ else => return value,
+ }
+}
+
+fn resolveRefId(self: *Assembler, ref: AsmValue.Ref) !Id {
+ const value = try self.resolveRef(ref);
+ return value.resultId();
+}
+
+fn parseInstruction(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ self.inst.opcode = undefined;
+ self.inst.operands.clearRetainingCapacity();
+ self.inst.string_bytes.clearRetainingCapacity();
+
+ const lhs_result_tok = self.currentToken();
+ const maybe_lhs_result: ?AsmValue.Ref = if (self.eatToken(.result_id_assign)) blk: {
+ const name = self.tokenText(lhs_result_tok)[1..];
+ const entry = try self.value_map.getOrPut(gpa, name);
+ try self.expectToken(.equals);
+ if (!entry.found_existing) {
+ entry.value_ptr.* = .just_declared;
+ }
+ break :blk @intCast(entry.index);
+ } else null;
+
+ const opcode_tok = self.currentToken();
+ if (maybe_lhs_result != null) {
+ try self.expectToken(.opcode);
+ } else if (!self.eatToken(.opcode)) {
+ return self.fail(opcode_tok.start, "expected start of instruction, found {s}", .{opcode_tok.tag.name()});
+ }
+
+ const opcode_text = self.tokenText(opcode_tok);
+ const index = self.inst_map.getIndex(opcode_text) orelse {
+ return self.fail(opcode_tok.start, "invalid opcode '{s}'", .{opcode_text});
+ };
+
+ const inst = spec.InstructionSet.core.instructions()[index];
+ self.inst.opcode = @enumFromInt(inst.opcode);
+
+ const expected_operands = inst.operands;
+ // This is a loop because the result-id is not always the first operand.
+ const requires_lhs_result = for (expected_operands) |op| {
+ if (op.kind == .id_result) break true;
+ } else false;
+
+ if (requires_lhs_result and maybe_lhs_result == null) {
+ return self.fail(opcode_tok.start, "opcode '{s}' expects result on left-hand side", .{@tagName(self.inst.opcode)});
+ } else if (!requires_lhs_result and maybe_lhs_result != null) {
+ return self.fail(
+ lhs_result_tok.start,
+ "opcode '{s}' does not expect a result-id on the left-hand side",
+ .{@tagName(self.inst.opcode)},
+ );
+ }
+
+ for (expected_operands) |operand| {
+ if (operand.kind == .id_result) {
+ try self.inst.operands.append(gpa, .{ .result_id = maybe_lhs_result.? });
+ continue;
+ }
+
+ switch (operand.quantifier) {
+ .required => if (self.isAtInstructionBoundary()) {
+ return self.fail(
+ self.currentToken().start,
+ "missing required operand", // TODO: Operand name?
+ .{},
+ );
+ } else {
+ try self.parseOperand(operand.kind);
+ },
+ .optional => if (!self.isAtInstructionBoundary()) {
+ try self.parseOperand(operand.kind);
+ },
+ .variadic => while (!self.isAtInstructionBoundary()) {
+ try self.parseOperand(operand.kind);
+ },
+ }
+ }
+}
+
+fn parseOperand(self: *Assembler, kind: spec.OperandKind) Error!void {
+ switch (kind.category()) {
+ .bit_enum => try self.parseBitEnum(kind),
+ .value_enum => try self.parseValueEnum(kind),
+ .id => try self.parseRefId(),
+ else => switch (kind) {
+ .literal_integer => try self.parseLiteralInteger(),
+ .literal_string => try self.parseString(),
+ .literal_context_dependent_number => try self.parseContextDependentNumber(),
+ .literal_ext_inst_integer => try self.parseLiteralExtInstInteger(),
+ .pair_id_ref_id_ref => try self.parsePhiSource(),
+ else => return self.todo("parse operand of type {s}", .{@tagName(kind)}),
+ },
+ }
+}
+
+/// Also handles parsing any required extra operands.
+fn parseBitEnum(self: *Assembler, kind: spec.OperandKind) !void {
+ const gpa = self.cg.module.gpa;
+
+ var tok = self.currentToken();
+ try self.expectToken(.value);
+
+ var text = self.tokenText(tok);
+ if (std.mem.eql(u8, text, "None")) {
+ try self.inst.operands.append(gpa, .{ .value = 0 });
+ return;
+ }
+
+ const enumerants = kind.enumerants();
+ var mask: u32 = 0;
+ while (true) {
+ const enumerant = for (enumerants) |enumerant| {
+ if (std.mem.eql(u8, enumerant.name, text))
+ break enumerant;
+ } else {
+ return self.fail(tok.start, "'{s}' is not a valid flag for bitmask {s}", .{ text, @tagName(kind) });
+ };
+ mask |= enumerant.value;
+ if (!self.eatToken(.pipe))
+ break;
+
+ tok = self.currentToken();
+ try self.expectToken(.value);
+ text = self.tokenText(tok);
+ }
+
+ try self.inst.operands.append(gpa, .{ .value = mask });
+
+ // Assume values are sorted.
+ // TODO: ensure in generator.
+ for (enumerants) |enumerant| {
+ if ((mask & enumerant.value) == 0)
+ continue;
+
+ for (enumerant.parameters) |param_kind| {
+ if (self.isAtInstructionBoundary()) {
+ return self.fail(self.currentToken().start, "missing required parameter for bit flag '{s}'", .{enumerant.name});
+ }
+
+ try self.parseOperand(param_kind);
+ }
+ }
+}
+
+/// Also handles parsing any required extra operands.
+fn parseValueEnum(self: *Assembler, kind: spec.OperandKind) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ if (self.eatToken(.placeholder)) {
+ const name = self.tokenText(tok)[1..];
+ const value = self.value_map.get(name) orelse {
+ return self.fail(tok.start, "invalid placeholder '${s}'", .{name});
+ };
+ switch (value) {
+ .constant => |literal32| {
+ try self.inst.operands.append(gpa, .{ .value = literal32 });
+ },
+ .string => |str| {
+ const enumerant = for (kind.enumerants()) |enumerant| {
+ if (std.mem.eql(u8, enumerant.name, str)) break enumerant;
+ } else {
+ return self.fail(tok.start, "'{s}' is not a valid value for enumeration {s}", .{ str, @tagName(kind) });
+ };
+ try self.inst.operands.append(gpa, .{ .value = enumerant.value });
+ },
+ else => return self.fail(tok.start, "value '{s}' cannot be used as placeholder", .{name}),
+ }
+ return;
+ }
+
+ try self.expectToken(.value);
+
+ const text = self.tokenText(tok);
+ const int_value = std.fmt.parseInt(u32, text, 0) catch null;
+ const enumerant = for (kind.enumerants()) |enumerant| {
+ if (int_value) |v| {
+ if (v == enumerant.value) break enumerant;
+ } else {
+ if (std.mem.eql(u8, enumerant.name, text)) break enumerant;
+ }
+ } else {
+ return self.fail(tok.start, "'{s}' is not a valid value for enumeration {s}", .{ text, @tagName(kind) });
+ };
+
+ try self.inst.operands.append(gpa, .{ .value = enumerant.value });
+
+ for (enumerant.parameters) |param_kind| {
+ if (self.isAtInstructionBoundary()) {
+ return self.fail(self.currentToken().start, "missing required parameter for enum variant '{s}'", .{enumerant.name});
+ }
+
+ try self.parseOperand(param_kind);
+ }
+}
+
+fn parseRefId(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ try self.expectToken(.result_id);
+
+ const name = self.tokenText(tok)[1..];
+ const entry = try self.value_map.getOrPut(gpa, name);
+ if (!entry.found_existing) {
+ entry.value_ptr.* = .unresolved_forward_reference;
+ }
+
+ const index: AsmValue.Ref = @intCast(entry.index);
+ try self.inst.operands.append(gpa, .{ .ref_id = index });
+}
+
+fn parseLiteralInteger(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ if (self.eatToken(.placeholder)) {
+ const name = self.tokenText(tok)[1..];
+ const value = self.value_map.get(name) orelse {
+ return self.fail(tok.start, "invalid placeholder '${s}'", .{name});
+ };
+ switch (value) {
+ .constant => |literal32| {
+ try self.inst.operands.append(gpa, .{ .literal32 = literal32 });
+ },
+ else => {
+ return self.fail(tok.start, "value '{s}' cannot be used as placeholder", .{name});
+ },
+ }
+ return;
+ }
+
+ try self.expectToken(.value);
+ // According to the SPIR-V machine readable grammar, a LiteralInteger
+ // may consist of one or more words. From the SPIR-V docs it seems like there
+ // only one instruction where multiple words are allowed, the literals that make up the
+ // switch cases of OpSwitch. This case is handled separately, and so we just assume
+ // everything is a 32-bit integer in this function.
+ const text = self.tokenText(tok);
+ const value = std.fmt.parseInt(u32, text, 0) catch {
+ return self.fail(tok.start, "'{s}' is not a valid 32-bit integer literal", .{text});
+ };
+ try self.inst.operands.append(gpa, .{ .literal32 = value });
+}
+
+fn parseLiteralExtInstInteger(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ if (self.eatToken(.placeholder)) {
+ const name = self.tokenText(tok)[1..];
+ const value = self.value_map.get(name) orelse {
+ return self.fail(tok.start, "invalid placeholder '${s}'", .{name});
+ };
+ switch (value) {
+ .constant => |literal32| {
+ try self.inst.operands.append(gpa, .{ .literal32 = literal32 });
+ },
+ else => {
+ return self.fail(tok.start, "value '{s}' cannot be used as placeholder", .{name});
+ },
+ }
+ return;
+ }
+
+ try self.expectToken(.value);
+ const text = self.tokenText(tok);
+ const value = std.fmt.parseInt(u32, text, 0) catch {
+ return self.fail(tok.start, "'{s}' is not a valid 32-bit integer literal", .{text});
+ };
+ try self.inst.operands.append(gpa, .{ .literal32 = value });
+}
+
+fn parseString(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ try self.expectToken(.string);
+ // Note, the string might not have a closing quote. In this case,
+ // an error is already emitted but we are trying to continue processing
+ // anyway, so in this function we have to deal with that situation.
+ const text = self.tokenText(tok);
+ assert(text.len > 0 and text[0] == '"');
+ const literal = if (text.len != 1 and text[text.len - 1] == '"')
+ text[1 .. text.len - 1]
+ else
+ text[1..];
+
+ const string_offset: u32 = @intCast(self.inst.string_bytes.items.len);
+ try self.inst.string_bytes.ensureUnusedCapacity(gpa, literal.len + 1);
+ self.inst.string_bytes.appendSliceAssumeCapacity(literal);
+ self.inst.string_bytes.appendAssumeCapacity(0);
+
+ try self.inst.operands.append(gpa, .{ .string = string_offset });
+}
+
+fn parseContextDependentNumber(self: *Assembler) !void {
+ const module = self.cg.module;
+
+ // For context dependent numbers, the actual type to parse is determined by the instruction.
+ // Currently, this operand appears in OpConstant and OpSpecConstant, where the too-be-parsed type
+ // is determined by the result type. That means that in this instructions we have to resolve the
+ // operand type early and look at the result to see how we need to proceed.
+ assert(self.inst.opcode == .OpConstant or self.inst.opcode == .OpSpecConstant);
+
+ const tok = self.currentToken();
+ const result = try self.resolveRef(self.inst.operands.items[0].ref_id);
+ const result_id = result.resultId();
+ // We are going to cheat a little bit: The types we are interested in, int and float,
+ // are added to the module and cached via module.intType and module.floatType. Therefore,
+ // we can determine the width of these types by directly checking the cache.
+ // This only works if the Assembler and codegen both use spv.intType and spv.floatType though.
+ // We don't expect there to be many of these types, so just look it up every time.
+ // TODO: Count be improved to be a little bit more efficent.
+
+ {
+ var it = module.cache.int_types.iterator();
+ while (it.next()) |entry| {
+ const id = entry.value_ptr.*;
+ if (id != result_id) continue;
+ const info = entry.key_ptr.*;
+ return try self.parseContextDependentInt(info.signedness, info.bits);
+ }
+ }
+
+ {
+ var it = module.cache.float_types.iterator();
+ while (it.next()) |entry| {
+ const id = entry.value_ptr.*;
+ if (id != result_id) continue;
+ const info = entry.key_ptr.*;
+ switch (info.bits) {
+ 16 => try self.parseContextDependentFloat(16),
+ 32 => try self.parseContextDependentFloat(32),
+ 64 => try self.parseContextDependentFloat(64),
+ else => return self.fail(tok.start, "cannot parse {}-bit info literal", .{info.bits}),
+ }
+ }
+ }
+
+ return self.fail(tok.start, "cannot parse literal constant", .{});
+}
+
+fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness, width: u32) !void {
+ const gpa = self.cg.module.gpa;
+
+ const tok = self.currentToken();
+ if (self.eatToken(.placeholder)) {
+ const name = self.tokenText(tok)[1..];
+ const value = self.value_map.get(name) orelse {
+ return self.fail(tok.start, "invalid placeholder '${s}'", .{name});
+ };
+ switch (value) {
+ .constant => |literal32| {
+ try self.inst.operands.append(gpa, .{ .literal32 = literal32 });
+ },
+ else => {
+ return self.fail(tok.start, "value '{s}' cannot be used as placeholder", .{name});
+ },
+ }
+ return;
+ }
+
+ try self.expectToken(.value);
+
+ if (width == 0 or width > 2 * @bitSizeOf(spec.Word)) {
+ return self.fail(tok.start, "cannot parse {}-bit integer literal", .{width});
+ }
+
+ const text = self.tokenText(tok);
+ invalid: {
+ // Just parse the integer as the next larger integer type, and check if it overflows afterwards.
+ const int = std.fmt.parseInt(i128, text, 0) catch break :invalid;
+ const min = switch (signedness) {
+ .unsigned => 0,
+ .signed => -(@as(i128, 1) << (@as(u7, @intCast(width)) - 1)),
+ };
+ const max = (@as(i128, 1) << (@as(u7, @intCast(width)) - @intFromBool(signedness == .signed))) - 1;
+ if (int < min or int > max) {
+ break :invalid;
+ }
+
+ // Note, we store the sign-extended version here.
+ if (width <= @bitSizeOf(spec.Word)) {
+ try self.inst.operands.append(gpa, .{ .literal32 = @truncate(@as(u128, @bitCast(int))) });
+ } else {
+ try self.inst.operands.append(gpa, .{ .literal64 = @truncate(@as(u128, @bitCast(int))) });
+ }
+ return;
+ }
+
+ return self.fail(tok.start, "'{s}' is not a valid {s} {}-bit int literal", .{ text, @tagName(signedness), width });
+}
+
+fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void {
+ const gpa = self.cg.module.gpa;
+
+ const Float = std.meta.Float(width);
+ const Int = std.meta.Int(.unsigned, width);
+
+ const tok = self.currentToken();
+ try self.expectToken(.value);
+
+ const text = self.tokenText(tok);
+
+ const value = std.fmt.parseFloat(Float, text) catch {
+ return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width });
+ };
+
+ const float_bits: Int = @bitCast(value);
+ if (width <= @bitSizeOf(spec.Word)) {
+ try self.inst.operands.append(gpa, .{ .literal32 = float_bits });
+ } else {
+ assert(width <= 2 * @bitSizeOf(spec.Word));
+ try self.inst.operands.append(gpa, .{ .literal64 = float_bits });
+ }
+}
+
+fn parsePhiSource(self: *Assembler) !void {
+ try self.parseRefId();
+ if (self.isAtInstructionBoundary()) {
+ return self.fail(self.currentToken().start, "missing phi block parent", .{});
+ }
+ try self.parseRefId();
+}
+
+/// Returns whether the `current_token` cursor
+/// is currently pointing at the start of a new instruction.
+fn isAtInstructionBoundary(self: Assembler) bool {
+ return switch (self.currentToken().tag) {
+ .opcode, .result_id_assign, .eof => true,
+ else => false,
+ };
+}
+
+fn expectToken(self: *Assembler, tag: Token.Tag) !void {
+ if (self.eatToken(tag))
+ return;
+
+ return self.fail(self.currentToken().start, "unexpected {s}, expected {s}", .{
+ self.currentToken().tag.name(),
+ tag.name(),
+ });
+}
+
+fn eatToken(self: *Assembler, tag: Token.Tag) bool {
+ if (self.testToken(tag)) {
+ self.current_token += 1;
+ return true;
+ }
+ return false;
+}
+
+fn testToken(self: Assembler, tag: Token.Tag) bool {
+ return self.currentToken().tag == tag;
+}
+
+fn currentToken(self: Assembler) Token {
+ return self.tokens.items[self.current_token];
+}
+
+fn tokenText(self: Assembler, tok: Token) []const u8 {
+ return self.src[tok.start..tok.end];
+}
+
+/// Tokenize `self.src` and put the tokens in `self.tokens`.
+/// Any errors encountered are appended to `self.errors`.
+fn tokenize(self: *Assembler) !void {
+ const gpa = self.cg.module.gpa;
+
+ self.tokens.clearRetainingCapacity();
+
+ var offset: u32 = 0;
+ while (true) {
+ const tok = try self.nextToken(offset);
+ // Resolve result-id assignment now.
+ // NOTE: If the previous token wasn't a result-id, just ignore it,
+ // we will catch it while parsing.
+ if (tok.tag == .equals and self.tokens.items[self.tokens.items.len - 1].tag == .result_id) {
+ self.tokens.items[self.tokens.items.len - 1].tag = .result_id_assign;
+ }
+ try self.tokens.append(gpa, tok);
+ if (tok.tag == .eof)
+ break;
+ offset = tok.end;
+ }
+}
+
+const Token = struct {
+ tag: Tag,
+ start: u32,
+ end: u32,
+
+ const Tag = enum {
+ /// Returned when there was no more input to match.
+ eof,
+ /// %identifier
+ result_id,
+ /// %identifier when appearing on the LHS of an equals sign.
+ /// While not technically a token, its relatively easy to resolve
+ /// this during lexical analysis and relieves a bunch of headaches
+ /// during parsing.
+ result_id_assign,
+ /// Mask, int, or float. These are grouped together as some
+ /// SPIR-V enumerants look a bit like integers as well (for example
+ /// "3D"), and so it is easier to just interpret them as the expected
+ /// type when resolving an instruction's operands.
+ value,
+ /// An enumerant that looks like an opcode, that is, OpXxxx.
+ /// Not necessarily a *valid* opcode.
+ opcode,
+ /// String literals.
+ /// Note, this token is also returned for unterminated
+ /// strings. In this case the closing " is not present.
+ string,
+ /// |.
+ pipe,
+ /// =.
+ equals,
+ /// $identifier. This is used (for now) for constant values, like integers.
+ /// These can be used in place of a normal `value`.
+ placeholder,
+
+ fn name(self: Tag) []const u8 {
+ return switch (self) {
+ .eof => "<end of input>",
+ .result_id => "<result-id>",
+ .result_id_assign => "<assigned result-id>",
+ .value => "<value>",
+ .opcode => "<opcode>",
+ .string => "<string literal>",
+ .pipe => "'|'",
+ .equals => "'='",
+ .placeholder => "<placeholder>",
+ };
+ }
+ };
+};
+
+/// Retrieve the next token from the input. This function will assert
+/// that the token is surrounded by whitespace if required, but will not
+/// interpret the token yet.
+/// NOTE: This function doesn't handle .result_id_assign - this is handled in tokenize().
+fn nextToken(self: *Assembler, start_offset: u32) !Token {
+ // We generally separate the input into the following types:
+ // - Whitespace. Generally ignored, but also used as delimiter for some
+ // tokens.
+ // - Values. This entails integers, floats, enums - anything that
+ // consists of alphanumeric characters, delimited by whitespace.
+ // - Result-IDs. This entails anything that consists of alphanumeric characters and _, and
+ // starts with a %. In contrast to values, this entity can be checked for complete correctness
+ // relatively easily here.
+ // - Strings. This entails quote-delimited text such as "abc".
+ // SPIR-V strings have only two escapes, \" and \\.
+ // - Sigils, = and |. In this assembler, these are not required to have whitespace
+ // around them (they act as delimiters) as they do in SPIRV-Tools.
+
+ var state: enum {
+ start,
+ value,
+ result_id,
+ string,
+ string_end,
+ escape,
+ placeholder,
+ } = .start;
+ var token_start = start_offset;
+ var offset = start_offset;
+ var tag = Token.Tag.eof;
+ while (offset < self.src.len) : (offset += 1) {
+ const c = self.src[offset];
+ switch (state) {
+ .start => switch (c) {
+ ' ', '\t', '\r', '\n' => token_start = offset + 1,
+ '"' => {
+ state = .string;
+ tag = .string;
+ },
+ '%' => {
+ state = .result_id;
+ tag = .result_id;
+ },
+ '|' => {
+ tag = .pipe;
+ offset += 1;
+ break;
+ },
+ '=' => {
+ tag = .equals;
+ offset += 1;
+ break;
+ },
+ '$' => {
+ state = .placeholder;
+ tag = .placeholder;
+ },
+ else => {
+ state = .value;
+ tag = .value;
+ },
+ },
+ .value => switch (c) {
+ '"' => {
+ try self.addError(offset, "unexpected string literal", .{});
+ // The user most likely just forgot a delimiter here - keep
+ // the tag as value.
+ break;
+ },
+ ' ', '\t', '\r', '\n', '=', '|' => break,
+ else => {},
+ },
+ .result_id, .placeholder => switch (c) {
+ '_', 'a'...'z', 'A'...'Z', '0'...'9' => {},
+ ' ', '\t', '\r', '\n', '=', '|' => break,
+ else => {
+ try self.addError(offset, "illegal character in result-id or placeholder", .{});
+ // Again, probably a forgotten delimiter here.
+ break;
+ },
+ },
+ .string => switch (c) {
+ '\\' => state = .escape,
+ '"' => state = .string_end,
+ else => {}, // Note, strings may include newlines
+ },
+ .string_end => switch (c) {
+ ' ', '\t', '\r', '\n', '=', '|' => break,
+ else => {
+ try self.addError(offset, "unexpected character after string literal", .{});
+ // The token is still unmistakibly a string.
+ break;
+ },
+ },
+ // Escapes simply skip the next char.
+ .escape => state = .string,
+ }
+ }
+
+ var tok: Token = .{
+ .tag = tag,
+ .start = token_start,
+ .end = offset,
+ };
+
+ switch (state) {
+ .string, .escape => {
+ try self.addError(token_start, "unterminated string", .{});
+ },
+ .result_id => if (offset - token_start == 1) {
+ try self.addError(token_start, "result-id must have at least one name character", .{});
+ },
+ .value => {
+ const text = self.tokenText(tok);
+ const prefix = "Op";
+ const looks_like_opcode = text.len > prefix.len and
+ std.mem.startsWith(u8, text, prefix) and
+ std.ascii.isUpper(text[prefix.len]);
+ if (looks_like_opcode)
+ tok.tag = .opcode;
+ },
+ else => {},
+ }
+
+ return tok;
+}
diff --git a/src/codegen/spirv/CodeGen.zig b/src/codegen/spirv/CodeGen.zig
new file mode 100644
index 0000000000..81c0e7da5c
--- /dev/null
+++ b/src/codegen/spirv/CodeGen.zig
@@ -0,0 +1,6168 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const Target = std.Target;
+const Signedness = std.builtin.Signedness;
+const assert = std.debug.assert;
+const log = std.log.scoped(.codegen);
+
+const Zcu = @import("../../Zcu.zig");
+const Type = @import("../../Type.zig");
+const Value = @import("../../Value.zig");
+const Air = @import("../../Air.zig");
+const InternPool = @import("../../InternPool.zig");
+const Section = @import("Section.zig");
+const Assembler = @import("Assembler.zig");
+
+const spec = @import("spec.zig");
+const Opcode = spec.Opcode;
+const Word = spec.Word;
+const Id = spec.Id;
+const IdRange = spec.IdRange;
+const StorageClass = spec.StorageClass;
+
+const Module = @import("Module.zig");
+const Decl = Module.Decl;
+const Repr = Module.Repr;
+const InternMap = Module.InternMap;
+const PtrTypeMap = Module.PtrTypeMap;
+
+const CodeGen = @This();
+
+pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features {
+ return comptime &.initMany(&.{
+ .expand_intcast_safe,
+ .expand_int_from_float_safe,
+ .expand_int_from_float_optimized_safe,
+ .expand_add_safe,
+ .expand_sub_safe,
+ .expand_mul_safe,
+ });
+}
+
+pub const zig_call_abi_ver = 3;
+
+const ControlFlow = union(enum) {
+ const Structured = struct {
+ /// This type indicates the way that a block is terminated. The
+ /// state of a particular block is used to track how a jump from
+ /// inside the block must reach the outside.
+ const Block = union(enum) {
+ const Incoming = struct {
+ src_label: Id,
+ /// Instruction that returns an u32 value of the
+ /// `Air.Inst.Index` that control flow should jump to.
+ next_block: Id,
+ };
+
+ const SelectionMerge = struct {
+ /// Incoming block from the `then` label.
+ /// Note that hte incoming block from the `else` label is
+ /// either given by the next element in the stack.
+ incoming: Incoming,
+ /// The label id of the cond_br's merge block.
+ /// For the top-most element in the stack, this
+ /// value is undefined.
+ merge_block: Id,
+ };
+
+ /// For a `selection` type block, we cannot use early exits, and we
+ /// must generate a 'merge ladder' of OpSelection instructions. To that end,
+ /// we keep a stack of the merges that still must be closed at the end of
+ /// a block.
+ ///
+ /// This entire structure basically just resembles a tree like
+ /// a x
+ /// \ /
+ /// b o merge
+ /// \ /
+ /// c o merge
+ /// \ /
+ /// o merge
+ /// /
+ /// o jump to next block
+ selection: struct {
+ /// In order to know which merges we still need to do, we need to keep
+ /// a stack of those.
+ merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .empty,
+ },
+ /// For a `loop` type block, we can early-exit the block by
+ /// jumping to the loop exit node, and we don't need to generate
+ /// an entire stack of merges.
+ loop: struct {
+ /// The next block to jump to can be determined from any number
+ /// of conditions that jump to the loop exit.
+ merges: std.ArrayListUnmanaged(Incoming) = .empty,
+ /// The label id of the loop's merge block.
+ merge_block: Id,
+ },
+
+ fn deinit(block: *Structured.Block, gpa: Allocator) void {
+ switch (block.*) {
+ .selection => |*merge| merge.merge_stack.deinit(gpa),
+ .loop => |*merge| merge.merges.deinit(gpa),
+ }
+ block.* = undefined;
+ }
+ };
+ /// This determines how exits from the current block must be handled.
+ block_stack: std.ArrayListUnmanaged(*Structured.Block) = .empty,
+ block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, Id) = .empty,
+ };
+
+ const Unstructured = struct {
+ const Incoming = struct {
+ src_label: Id,
+ break_value_id: Id,
+ };
+
+ const Block = struct {
+ label: ?Id = null,
+ incoming_blocks: std.ArrayListUnmanaged(Incoming) = .empty,
+ };
+
+ /// We need to keep track of result ids for block labels, as well as the 'incoming'
+ /// blocks for a block.
+ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .empty,
+ };
+
+ structured: Structured,
+ unstructured: Unstructured,
+
+ pub fn deinit(cg: *ControlFlow, gpa: Allocator) void {
+ switch (cg.*) {
+ .structured => |*cf| {
+ cf.block_stack.deinit(gpa);
+ cf.block_results.deinit(gpa);
+ },
+ .unstructured => |*cf| {
+ cf.blocks.deinit(gpa);
+ },
+ }
+ cg.* = undefined;
+ }
+};
+
+pt: Zcu.PerThread,
+air: Air,
+/// Note: If the declaration is not a function, this value will be undefined!
+liveness: Air.Liveness,
+owner_nav: InternPool.Nav.Index,
+module: *Module,
+control_flow: ControlFlow,
+base_line: u32,
+block_label: Id = .none,
+/// The base offset of the current decl, which is what `dbg_stmt` is relative to.
+/// An array of function argument result-ids. Each index corresponds with the
+/// function argument of the same index.
+args: std.ArrayListUnmanaged(Id) = .empty,
+/// A counter to keep track of how many `arg` instructions we've seen yet.
+next_arg_index: u32 = 0,
+/// A map keeping track of which instruction generated which result-id.
+inst_results: std.AutoHashMapUnmanaged(Air.Inst.Index, Id) = .empty,
+file_path_id: Id = .none,
+prologue: Section = .{},
+body: Section = .{},
+decl_deps: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .empty,
+error_msg: ?*Zcu.ErrorMsg = null,
+
+/// Free resources owned by the CodeGen.
+pub fn deinit(cg: *CodeGen) void {
+ const gpa = cg.module.gpa;
+ cg.args.deinit(gpa);
+ cg.inst_results.deinit(gpa);
+ cg.control_flow.deinit(gpa);
+ cg.prologue.deinit(gpa);
+ cg.body.deinit(gpa);
+ cg.decl_deps.deinit(gpa);
+}
+
+const Error = error{ CodegenFail, OutOfMemory };
+
+pub fn genNav(cg: *CodeGen, do_codegen: bool) Error!void {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const target = zcu.getTarget();
+
+ const nav = ip.getNav(cg.owner_nav);
+ const val = zcu.navValue(cg.owner_nav);
+ const ty = val.typeOf(zcu);
+
+ if (!do_codegen and !ty.hasRuntimeBits(zcu)) return;
+
+ const spv_decl_index = try cg.module.resolveNav(ip, cg.owner_nav);
+ const result_id = cg.module.declPtr(spv_decl_index).result_id;
+
+ switch (cg.module.declPtr(spv_decl_index).kind) {
+ .func => {
+ const fn_info = zcu.typeToFunc(ty).?;
+ const return_ty_id = try cg.resolveFnReturnType(.fromInterned(fn_info.return_type));
+ const is_test = zcu.test_functions.contains(cg.owner_nav);
+
+ const func_result_id = if (is_test) cg.module.allocId() else result_id;
+ const prototype_ty_id = try cg.resolveType(ty, .direct);
+ try cg.prologue.emit(cg.module.gpa, .OpFunction, .{
+ .id_result_type = return_ty_id,
+ .id_result = func_result_id,
+ .function_type = prototype_ty_id,
+ // Note: the backend will never be asked to generate an inline function
+ // (this is handled in sema), so we don't need to set function_control here.
+ .function_control = .{},
+ });
+
+ comptime assert(zig_call_abi_ver == 3);
+ try cg.args.ensureUnusedCapacity(gpa, fn_info.param_types.len);
+ for (fn_info.param_types.get(ip)) |param_ty_index| {
+ const param_ty: Type = .fromInterned(param_ty_index);
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ const param_type_id = try cg.resolveType(param_ty, .direct);
+ const arg_result_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpFunctionParameter, .{
+ .id_result_type = param_type_id,
+ .id_result = arg_result_id,
+ });
+ cg.args.appendAssumeCapacity(arg_result_id);
+ }
+
+ // TODO: This could probably be done in a better way...
+ const root_block_id = cg.module.allocId();
+
+ // The root block of a function declaration should appear before OpVariable instructions,
+ // so it is generated into the function's prologue.
+ try cg.prologue.emit(cg.module.gpa, .OpLabel, .{
+ .id_result = root_block_id,
+ });
+ cg.block_label = root_block_id;
+
+ const main_body = cg.air.getMainBody();
+ switch (cg.control_flow) {
+ .structured => {
+ _ = try cg.genStructuredBody(.selection, main_body);
+ // We always expect paths to here to end, but we still need the block
+ // to act as a dummy merge block.
+ try cg.body.emit(cg.module.gpa, .OpUnreachable, {});
+ },
+ .unstructured => {
+ try cg.genBody(main_body);
+ },
+ }
+ try cg.body.emit(cg.module.gpa, .OpFunctionEnd, {});
+ // Append the actual code into the functions section.
+ try cg.module.sections.functions.append(cg.module.gpa, cg.prologue);
+ try cg.module.sections.functions.append(cg.module.gpa, cg.body);
+
+ // Temporarily generate a test kernel declaration if this is a test function.
+ if (is_test) {
+ try cg.generateTestEntryPoint(nav.fqn.toSlice(ip), spv_decl_index, func_result_id);
+ }
+
+ try cg.module.declareDeclDeps(spv_decl_index, cg.decl_deps.keys());
+ try cg.module.debugName(func_result_id, nav.fqn.toSlice(ip));
+ },
+ .global => {
+ const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) {
+ .func => unreachable,
+ .variable => |variable| .fromInterned(variable.init),
+ .@"extern" => null,
+ else => val,
+ };
+ assert(maybe_init_val == null); // TODO
+
+ const storage_class = cg.module.storageClass(nav.getAddrspace());
+ assert(storage_class != .generic); // These should be instance globals
+
+ const ty_id = try cg.resolveType(ty, .indirect);
+ const ptr_ty_id = try cg.module.ptrType(ty_id, storage_class);
+
+ try cg.module.sections.globals.emit(cg.module.gpa, .OpVariable, .{
+ .id_result_type = ptr_ty_id,
+ .id_result = result_id,
+ .storage_class = storage_class,
+ });
+
+ switch (target.os.tag) {
+ .vulkan, .opengl => {
+ if (ty.zigTypeTag(zcu) == .@"struct") {
+ switch (storage_class) {
+ .uniform, .push_constant => try cg.module.decorate(ty_id, .block),
+ else => {},
+ }
+ }
+
+ switch (ip.indexToKey(ty.toIntern())) {
+ .func_type, .opaque_type => {},
+ else => {
+ try cg.module.decorate(ptr_ty_id, .{
+ .array_stride = .{ .array_stride = @intCast(ty.abiSize(zcu)) },
+ });
+ },
+ }
+ },
+ else => {},
+ }
+
+ if (std.meta.stringToEnum(spec.BuiltIn, nav.fqn.toSlice(ip))) |builtin| {
+ try cg.module.decorate(result_id, .{ .built_in = .{ .built_in = builtin } });
+ }
+
+ try cg.module.debugName(result_id, nav.fqn.toSlice(ip));
+ try cg.module.declareDeclDeps(spv_decl_index, &.{});
+ },
+ .invocation_global => {
+ const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) {
+ .func => unreachable,
+ .variable => |variable| .fromInterned(variable.init),
+ .@"extern" => null,
+ else => val,
+ };
+
+ try cg.module.declareDeclDeps(spv_decl_index, &.{});
+
+ const ty_id = try cg.resolveType(ty, .indirect);
+ const ptr_ty_id = try cg.module.ptrType(ty_id, .function);
+
+ if (maybe_init_val) |init_val| {
+ // TODO: Combine with resolveAnonDecl?
+ const void_ty_id = try cg.resolveType(.void, .direct);
+ const initializer_proto_ty_id = try cg.module.functionType(void_ty_id, &.{});
+
+ const initializer_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpFunction, .{
+ .id_result_type = try cg.resolveType(.void, .direct),
+ .id_result = initializer_id,
+ .function_control = .{},
+ .function_type = initializer_proto_ty_id,
+ });
+
+ const root_block_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpLabel, .{
+ .id_result = root_block_id,
+ });
+ cg.block_label = root_block_id;
+
+ const val_id = try cg.constant(ty, init_val, .indirect);
+ try cg.body.emit(cg.module.gpa, .OpStore, .{
+ .pointer = result_id,
+ .object = val_id,
+ });
+
+ try cg.body.emit(cg.module.gpa, .OpReturn, {});
+ try cg.body.emit(cg.module.gpa, .OpFunctionEnd, {});
+ try cg.module.sections.functions.append(cg.module.gpa, cg.prologue);
+ try cg.module.sections.functions.append(cg.module.gpa, cg.body);
+ try cg.module.declareDeclDeps(spv_decl_index, cg.decl_deps.keys());
+
+ try cg.module.debugNameFmt(initializer_id, "initializer of {f}", .{nav.fqn.fmt(ip)});
+
+ try cg.module.sections.globals.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = ptr_ty_id,
+ .id_result = result_id,
+ .set = try cg.module.importInstructionSet(.zig),
+ .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
+ .id_ref_4 = &.{initializer_id},
+ });
+ } else {
+ try cg.module.sections.globals.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = ptr_ty_id,
+ .id_result = result_id,
+ .set = try cg.module.importInstructionSet(.zig),
+ .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
+ .id_ref_4 = &.{},
+ });
+ }
+ },
+ }
+}
+
+pub fn fail(cg: *CodeGen, comptime format: []const u8, args: anytype) Error {
+ @branchHint(.cold);
+ const zcu = cg.module.zcu;
+ const src_loc = zcu.navSrcLoc(cg.owner_nav);
+ assert(cg.error_msg == null);
+ cg.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args);
+ return error.CodegenFail;
+}
+
+pub fn todo(cg: *CodeGen, comptime format: []const u8, args: anytype) Error {
+ return cg.fail("TODO (SPIR-V): " ++ format, args);
+}
+
+/// This imports the "default" extended instruction set for the target
+/// For OpenCL, OpenCL.std.100. For Vulkan and OpenGL, GLSL.std.450.
+fn importExtendedSet(cg: *CodeGen) !Id {
+ const target = cg.module.zcu.getTarget();
+ return switch (target.os.tag) {
+ .opencl, .amdhsa => try cg.module.importInstructionSet(.@"OpenCL.std"),
+ .vulkan, .opengl => try cg.module.importInstructionSet(.@"GLSL.std.450"),
+ else => unreachable,
+ };
+}
+
+/// Fetch the result-id for a previously generated instruction or constant.
+fn resolve(cg: *CodeGen, inst: Air.Inst.Ref) !Id {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ if (try cg.air.value(inst, pt)) |val| {
+ const ty = cg.typeOf(inst);
+ if (ty.zigTypeTag(zcu) == .@"fn") {
+ const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) {
+ .@"extern" => |@"extern"| @"extern".owner_nav,
+ .func => |func| func.owner_nav,
+ else => unreachable,
+ };
+ const spv_decl_index = try cg.module.resolveNav(ip, fn_nav);
+ try cg.decl_deps.put(cg.module.gpa, spv_decl_index, {});
+ return cg.module.declPtr(spv_decl_index).result_id;
+ }
+
+ return try cg.constant(ty, val, .direct);
+ }
+ const index = inst.toIndex().?;
+ return cg.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
+}
+
+fn resolveUav(cg: *CodeGen, val: InternPool.Index) !Id {
+ const gpa = cg.module.gpa;
+
+ // TODO: This cannot be a function at this point, but it should probably be handled anyway.
+
+ const zcu = cg.module.zcu;
+ const ty: Type = .fromInterned(zcu.intern_pool.typeOf(val));
+ const ty_id = try cg.resolveType(ty, .indirect);
+
+ const spv_decl_index = blk: {
+ const entry = try cg.module.uav_link.getOrPut(cg.module.gpa, .{ val, .function });
+ if (entry.found_existing) {
+ try cg.addFunctionDep(entry.value_ptr.*, .function);
+ return cg.module.declPtr(entry.value_ptr.*).result_id;
+ }
+
+ const spv_decl_index = try cg.module.allocDecl(.invocation_global);
+ try cg.addFunctionDep(spv_decl_index, .function);
+ entry.value_ptr.* = spv_decl_index;
+ break :blk spv_decl_index;
+ };
+
+ // TODO: At some point we will be able to generate this all constant here, but then all of
+ // constant() will need to be implemented such that it doesn't generate any at-runtime code.
+ // NOTE: Because this is a global, we really only want to initialize it once. Therefore the
+ // constant lowering of this value will need to be deferred to an initializer similar to
+ // other globals.
+
+ const result_id = cg.module.declPtr(spv_decl_index).result_id;
+
+ {
+ // Save the current state so that we can temporarily generate into a different function.
+ // TODO: This should probably be made a little more robust.
+ const func_prologue = cg.prologue;
+ const func_body = cg.body;
+ const func_deps = cg.decl_deps;
+ const block_label = cg.block_label;
+ defer {
+ cg.prologue = func_prologue;
+ cg.body = func_body;
+ cg.decl_deps = func_deps;
+ cg.block_label = block_label;
+ }
+
+ cg.prologue = .{};
+ cg.body = .{};
+ cg.decl_deps = .{};
+ defer {
+ cg.prologue.deinit(gpa);
+ cg.body.deinit(gpa);
+ cg.decl_deps.deinit(gpa);
+ }
+
+ const void_ty_id = try cg.resolveType(.void, .direct);
+ const initializer_proto_ty_id = try cg.module.functionType(void_ty_id, &.{});
+
+ const initializer_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpFunction, .{
+ .id_result_type = try cg.resolveType(.void, .direct),
+ .id_result = initializer_id,
+ .function_control = .{},
+ .function_type = initializer_proto_ty_id,
+ });
+ const root_block_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpLabel, .{
+ .id_result = root_block_id,
+ });
+ cg.block_label = root_block_id;
+
+ const val_id = try cg.constant(ty, .fromInterned(val), .indirect);
+ try cg.body.emit(cg.module.gpa, .OpStore, .{
+ .pointer = result_id,
+ .object = val_id,
+ });
+
+ try cg.body.emit(cg.module.gpa, .OpReturn, {});
+ try cg.body.emit(cg.module.gpa, .OpFunctionEnd, {});
+
+ try cg.module.sections.functions.append(cg.module.gpa, cg.prologue);
+ try cg.module.sections.functions.append(cg.module.gpa, cg.body);
+ try cg.module.declareDeclDeps(spv_decl_index, cg.decl_deps.keys());
+
+ try cg.module.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
+
+ const fn_decl_ptr_ty_id = try cg.module.ptrType(ty_id, .function);
+ try cg.module.sections.globals.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = fn_decl_ptr_ty_id,
+ .id_result = result_id,
+ .set = try cg.module.importInstructionSet(.zig),
+ .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
+ .id_ref_4 = &.{initializer_id},
+ });
+ }
+
+ return result_id;
+}
+
+fn addFunctionDep(cg: *CodeGen, decl_index: Module.Decl.Index, storage_class: StorageClass) !void {
+ const target = cg.module.zcu.getTarget();
+ if (target.cpu.has(.spirv, .v1_4)) {
+ try cg.decl_deps.put(cg.module.gpa, decl_index, {});
+ } else {
+ // Before version 1.4, the interface’s storage classes are limited to the Input and Output
+ if (storage_class == .input or storage_class == .output) {
+ try cg.decl_deps.put(cg.module.gpa, decl_index, {});
+ }
+ }
+}
+
+/// Start a new SPIR-V block, Emits the label of the new block, and stores which
+/// block we are currently generating.
+/// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
+/// keep track of the previous block.
+fn beginSpvBlock(cg: *CodeGen, label: Id) !void {
+ try cg.body.emit(cg.module.gpa, .OpLabel, .{ .id_result = label });
+ cg.block_label = label;
+}
+
+/// Return the amount of bits in the largest supported integer type. This is either 32 (always supported), or 64 (if
+/// the Int64 capability is enabled).
+/// Note: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits).
+/// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there
+/// is no way of knowing whether those are actually supported.
+/// TODO: Maybe this should be cached?
+fn largestSupportedIntBits(cg: *CodeGen) u16 {
+ const target = cg.module.zcu.getTarget();
+ if (target.cpu.has(.spirv, .int64) or target.cpu.arch == .spirv64) {
+ return 64;
+ }
+ return 32;
+}
+
+const ArithmeticTypeInfo = struct {
+ const Class = enum {
+ bool,
+ /// A regular, **native**, integer.
+ /// This is only returned when the backend supports this int as a native type (when
+ /// the relevant capability is enabled).
+ integer,
+ /// A regular float. These are all required to be natively supported. Floating points
+ /// for which the relevant capability is not enabled are not emulated.
+ float,
+ /// An integer of a 'strange' size (which' bit size is not the same as its backing
+ /// type. **Note**: this may **also** include power-of-2 integers for which the
+ /// relevant capability is not enabled), but still within the limits of the largest
+ /// natively supported integer type.
+ strange_integer,
+ /// An integer with more bits than the largest natively supported integer type.
+ composite_integer,
+ };
+
+ /// A classification of the inner type.
+ /// These scenarios will all have to be handled slightly different.
+ class: Class,
+ /// The number of bits in the inner type.
+ /// This is the actual number of bits of the type, not the size of the backing integer.
+ bits: u16,
+ /// The number of bits required to store the type.
+ /// For `integer` and `float`, this is equal to `bits`.
+ /// For `strange_integer` and `bool` this is the size of the backing integer.
+ /// For `composite_integer` this is the elements count.
+ backing_bits: u16,
+ /// Null if this type is a scalar, or the length of the vector otherwise.
+ vector_len: ?u32,
+ /// Whether the inner type is signed. Only relevant for integers.
+ signedness: std.builtin.Signedness,
+};
+
+fn arithmeticTypeInfo(cg: *CodeGen, ty: Type) ArithmeticTypeInfo {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ var scalar_ty = ty.scalarType(zcu);
+ if (scalar_ty.zigTypeTag(zcu) == .@"enum") {
+ scalar_ty = scalar_ty.intTagType(zcu);
+ }
+ const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null;
+ return switch (scalar_ty.zigTypeTag(zcu)) {
+ .bool => .{
+ .bits = 1, // Doesn't matter for this class.
+ .backing_bits = cg.module.backingIntBits(1).@"0",
+ .vector_len = vector_len,
+ .signedness = .unsigned, // Technically, but doesn't matter for this class.
+ .class = .bool,
+ },
+ .float => .{
+ .bits = scalar_ty.floatBits(target),
+ .backing_bits = scalar_ty.floatBits(target), // TODO: F80?
+ .vector_len = vector_len,
+ .signedness = .signed, // Technically, but doesn't matter for this class.
+ .class = .float,
+ },
+ .int => blk: {
+ const int_info = scalar_ty.intInfo(zcu);
+ // TODO: Maybe it's useful to also return this value.
+ const backing_bits, const big_int = cg.module.backingIntBits(int_info.bits);
+ break :blk .{
+ .bits = int_info.bits,
+ .backing_bits = backing_bits,
+ .vector_len = vector_len,
+ .signedness = int_info.signedness,
+ .class = class: {
+ if (big_int) break :class .composite_integer;
+ break :class if (backing_bits == int_info.bits) .integer else .strange_integer;
+ },
+ };
+ },
+ .@"enum" => unreachable,
+ .vector => unreachable,
+ else => unreachable, // Unhandled arithmetic type
+ };
+}
+
+/// Checks whether the type can be directly translated to SPIR-V vectors
+fn isSpvVector(cg: *CodeGen, ty: Type) bool {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ if (ty.zigTypeTag(zcu) != .vector) return false;
+
+ // TODO: This check must be expanded for types that can be represented
+ // as integers (enums / packed structs?) and types that are represented
+ // by multiple SPIR-V values.
+ const scalar_ty = ty.scalarType(zcu);
+ switch (scalar_ty.zigTypeTag(zcu)) {
+ .bool,
+ .int,
+ .float,
+ => {},
+ else => return false,
+ }
+
+ const elem_ty = ty.childType(zcu);
+ const len = ty.vectorLen(zcu);
+
+ if (elem_ty.isNumeric(zcu) or elem_ty.toIntern() == .bool_type) {
+ if (len > 1 and len <= 4) return true;
+ if (target.cpu.has(.spirv, .vector16)) return (len == 8 or len == 16);
+ }
+
+ return false;
+}
+
+/// Emits a bool constant in a particular representation.
+fn constBool(cg: *CodeGen, value: bool, repr: Repr) !Id {
+ return switch (repr) {
+ .indirect => cg.constInt(.u1, @intFromBool(value)),
+ .direct => cg.module.constBool(value),
+ };
+}
+
+/// Emits an integer constant.
+/// This function, unlike Module.constInt, takes care to bitcast
+/// the value to an unsigned int first for Kernels.
+fn constInt(cg: *CodeGen, ty: Type, value: anytype) !Id {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const scalar_ty = ty.scalarType(zcu);
+ const int_info = scalar_ty.intInfo(zcu);
+ // Use backing bits so that negatives are sign extended
+ const backing_bits, const big_int = cg.module.backingIntBits(int_info.bits);
+ assert(backing_bits != 0); // u0 is comptime
+
+ const result_ty_id = try cg.resolveType(scalar_ty, .indirect);
+ const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) {
+ .int => |int| int.signedness,
+ .comptime_int => if (value < 0) .signed else .unsigned,
+ else => unreachable,
+ };
+ if (@sizeOf(@TypeOf(value)) >= 4 and big_int) {
+ const value64: u64 = switch (signedness) {
+ .signed => @bitCast(@as(i64, @intCast(value))),
+ .unsigned => @as(u64, @intCast(value)),
+ };
+ assert(backing_bits == 64);
+ return cg.constructComposite(result_ty_id, &.{
+ try cg.constInt(.u32, @as(u32, @truncate(value64))),
+ try cg.constInt(.u32, @as(u32, @truncate(value64 << 32))),
+ });
+ }
+
+ const final_value: spec.LiteralContextDependentNumber = switch (target.os.tag) {
+ .opencl, .amdhsa => blk: {
+ const value64: u64 = switch (signedness) {
+ .signed => @bitCast(@as(i64, @intCast(value))),
+ .unsigned => @as(u64, @intCast(value)),
+ };
+
+ // Manually truncate the value to the right amount of bits.
+ const truncated_value = if (backing_bits == 64)
+ value64
+ else
+ value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1;
+
+ break :blk switch (backing_bits) {
+ 1...32 => .{ .uint32 = @truncate(truncated_value) },
+ 33...64 => .{ .uint64 = truncated_value },
+ else => unreachable,
+ };
+ },
+ else => switch (backing_bits) {
+ 1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) },
+ 33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value },
+ else => unreachable,
+ },
+ };
+
+ const result_id = try cg.module.constant(result_ty_id, final_value);
+
+ if (!ty.isVector(zcu)) return result_id;
+ return cg.constructCompositeSplat(ty, result_id);
+}
+
+pub fn constructComposite(cg: *CodeGen, result_ty_id: Id, constituents: []const Id) !Id {
+ const gpa = cg.module.gpa;
+ const result_id = cg.module.allocId();
+ try cg.body.emit(gpa, .OpCompositeConstruct, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .constituents = constituents,
+ });
+ return result_id;
+}
+
+/// Construct a composite at runtime with all lanes set to the same value.
+/// ty must be an aggregate type.
+fn constructCompositeSplat(cg: *CodeGen, ty: Type, constituent: Id) !Id {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const n: usize = @intCast(ty.arrayLen(zcu));
+
+ const constituents = try gpa.alloc(Id, n);
+ defer gpa.free(constituents);
+ @memset(constituents, constituent);
+
+ const result_ty_id = try cg.resolveType(ty, .direct);
+ return cg.constructComposite(result_ty_id, constituents);
+}
+
+/// This function generates a load for a constant in direct (ie, non-memory) representation.
+/// When the constant is simple, it can be generated directly using OpConstant instructions.
+/// When the constant is more complicated however, it needs to be constructed using multiple values. This
+/// is done by emitting a sequence of instructions that initialize the value.
+//
+/// This function should only be called during function code generation.
+fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
+ const gpa = cg.module.gpa;
+
+ // Note: Using intern_map can only be used with constants that DO NOT generate any runtime code!!
+ // Ideally that should be all constants in the future, or it should be cleaned up somehow. For
+ // now, only use the intern_map on case-by-case basis by breaking to :cache.
+ if (cg.module.intern_map.get(.{ val.toIntern(), repr })) |id| {
+ return id;
+ }
+
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const result_ty_id = try cg.resolveType(ty, repr);
+ const ip = &zcu.intern_pool;
+
+ log.debug("lowering constant: ty = {f}, val = {f}, key = {s}", .{ ty.fmt(pt), val.fmtValue(pt), @tagName(ip.indexToKey(val.toIntern())) });
+ if (val.isUndefDeep(zcu)) {
+ return cg.module.constUndef(result_ty_id);
+ }
+
+ const cacheable_id = cache: {
+ switch (ip.indexToKey(val.toIntern())) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .tuple_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef => unreachable, // handled above
+
+ .variable,
+ .@"extern",
+ .func,
+ .enum_literal,
+ .empty_enum_value,
+ => unreachable, // non-runtime values
+
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_tuple,
+ .@"unreachable",
+ => unreachable, // non-runtime values
+
+ .false, .true => break :cache try cg.constBool(val.toBool(), repr),
+ },
+ .int => {
+ if (ty.isSignedInt(zcu)) {
+ break :cache try cg.constInt(ty, val.toSignedInt(zcu));
+ } else {
+ break :cache try cg.constInt(ty, val.toUnsignedInt(zcu));
+ }
+ },
+ .float => {
+ const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) {
+ 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, zcu))) },
+ 32 => .{ .float32 = val.toFloat(f32, zcu) },
+ 64 => .{ .float64 = val.toFloat(f64, zcu) },
+ 80, 128 => unreachable, // TODO
+ else => unreachable,
+ };
+ break :cache try cg.module.constant(result_ty_id, lit);
+ },
+ .err => |err| {
+ const value = try pt.getErrorValue(err.name);
+ break :cache try cg.constInt(ty, value);
+ },
+ .error_union => |error_union| {
+ // TODO: Error unions may be constructed with constant instructions if the payload type
+ // allows it. For now, just generate it here regardless.
+ const err_ty = ty.errorUnionSet(zcu);
+ const payload_ty = ty.errorUnionPayload(zcu);
+ const err_val_id = switch (error_union.val) {
+ .err_name => |err_name| try cg.constInt(
+ err_ty,
+ try pt.getErrorValue(err_name),
+ ),
+ .payload => try cg.constInt(err_ty, 0),
+ };
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+ if (!eu_layout.payload_has_bits) {
+ // We use the error type directly as the type.
+ break :cache err_val_id;
+ }
+
+ const payload_val_id = switch (error_union.val) {
+ .err_name => try cg.constant(payload_ty, .undef, .indirect),
+ .payload => |p| try cg.constant(payload_ty, .fromInterned(p), .indirect),
+ };
+
+ var constituents: [2]Id = undefined;
+ var types: [2]Type = undefined;
+ if (eu_layout.error_first) {
+ constituents[0] = err_val_id;
+ constituents[1] = payload_val_id;
+ types = .{ err_ty, payload_ty };
+ } else {
+ constituents[0] = payload_val_id;
+ constituents[1] = err_val_id;
+ types = .{ payload_ty, err_ty };
+ }
+
+ const comp_ty_id = try cg.resolveType(ty, .direct);
+ return try cg.constructComposite(comp_ty_id, &constituents);
+ },
+ .enum_tag => {
+ const int_val = try val.intFromEnum(ty, pt);
+ const int_ty = ty.intTagType(zcu);
+ break :cache try cg.constant(int_ty, int_val, repr);
+ },
+ .ptr => return cg.constantPtr(val),
+ .slice => |slice| {
+ const ptr_id = try cg.constantPtr(.fromInterned(slice.ptr));
+ const len_id = try cg.constant(.usize, .fromInterned(slice.len), .indirect);
+ const comp_ty_id = try cg.resolveType(ty, .direct);
+ return try cg.constructComposite(comp_ty_id, &.{ ptr_id, len_id });
+ },
+ .opt => {
+ const payload_ty = ty.optionalChild(zcu);
+ const maybe_payload_val = val.optionalValue(zcu);
+
+ if (!payload_ty.hasRuntimeBits(zcu)) {
+ break :cache try cg.constBool(maybe_payload_val != null, .indirect);
+ } else if (ty.optionalReprIsPayload(zcu)) {
+ // Optional representation is a nullable pointer or slice.
+ if (maybe_payload_val) |payload_val| {
+ return try cg.constant(payload_ty, payload_val, .indirect);
+ } else {
+ break :cache try cg.module.constNull(result_ty_id);
+ }
+ }
+
+ // Optional representation is a structure.
+ // { Payload, Bool }
+
+ const has_pl_id = try cg.constBool(maybe_payload_val != null, .indirect);
+ const payload_id = if (maybe_payload_val) |payload_val|
+ try cg.constant(payload_ty, payload_val, .indirect)
+ else
+ try cg.module.constUndef(try cg.resolveType(payload_ty, .indirect));
+
+ const comp_ty_id = try cg.resolveType(ty, .direct);
+ return try cg.constructComposite(comp_ty_id, &.{ payload_id, has_pl_id });
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
+ inline .array_type, .vector_type => |array_type, tag| {
+ const elem_ty: Type = .fromInterned(array_type.child);
+
+ const constituents = try gpa.alloc(Id, @intCast(ty.arrayLenIncludingSentinel(zcu)));
+ defer gpa.free(constituents);
+
+ const child_repr: Repr = switch (tag) {
+ .array_type => .indirect,
+ .vector_type => .direct,
+ else => unreachable,
+ };
+
+ switch (aggregate.storage) {
+ .bytes => |bytes| {
+ // TODO: This is really space inefficient, perhaps there is a better
+ // way to do it?
+ for (constituents, bytes.toSlice(constituents.len, ip)) |*constituent, byte| {
+ constituent.* = try cg.constInt(elem_ty, byte);
+ }
+ },
+ .elems => |elems| {
+ for (constituents, elems) |*constituent, elem| {
+ constituent.* = try cg.constant(elem_ty, .fromInterned(elem), child_repr);
+ }
+ },
+ .repeated_elem => |elem| {
+ @memset(constituents, try cg.constant(elem_ty, .fromInterned(elem), child_repr));
+ },
+ }
+
+ const comp_ty_id = try cg.resolveType(ty, .direct);
+ return cg.constructComposite(comp_ty_id, constituents);
+ },
+ .struct_type => {
+ const struct_type = zcu.typeToStruct(ty).?;
+
+ if (struct_type.layout == .@"packed") {
+ // TODO: composite int
+ // TODO: endianness
+ const bits: u16 = @intCast(ty.bitSize(zcu));
+ const bytes = std.mem.alignForward(u16, cg.module.backingIntBits(bits).@"0", 8) / 8;
+ var limbs: [8]u8 = undefined;
+ @memset(&limbs, 0);
+ val.writeToPackedMemory(ty, pt, limbs[0..bytes], 0) catch unreachable;
+ const backing_ty: Type = .fromInterned(struct_type.backingIntTypeUnordered(ip));
+ return try cg.constInt(backing_ty, @as(u64, @bitCast(limbs)));
+ }
+
+ var types = std.ArrayList(Type).init(gpa);
+ defer types.deinit();
+
+ var constituents = std.ArrayList(Id).init(gpa);
+ defer constituents.deinit();
+
+ var it = struct_type.iterateRuntimeOrder(ip);
+ while (it.next()) |field_index| {
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ // This is a zero-bit field - we only needed it for the alignment.
+ continue;
+ }
+
+ // TODO: Padding?
+ const field_val = try val.fieldValue(pt, field_index);
+ const field_id = try cg.constant(field_ty, field_val, .indirect);
+
+ try types.append(field_ty);
+ try constituents.append(field_id);
+ }
+
+ const comp_ty_id = try cg.resolveType(ty, .direct);
+ return try cg.constructComposite(comp_ty_id, constituents.items);
+ },
+ .tuple_type => return cg.todo("implement tuple types", .{}),
+ else => unreachable,
+ },
+ .un => |un| {
+ if (un.tag == .none) {
+ assert(ty.containerLayout(zcu) == .@"packed"); // TODO
+ const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
+ return try cg.constInt(int_ty, Value.toUnsignedInt(.fromInterned(un.val), zcu));
+ }
+ const active_field = ty.unionTagFieldIndex(.fromInterned(un.tag), zcu).?;
+ const union_obj = zcu.typeToUnion(ty).?;
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[active_field]);
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ try cg.constant(field_ty, .fromInterned(un.val), .direct)
+ else
+ null;
+ return try cg.unionInit(ty, active_field, payload);
+ },
+ .memoized_call => unreachable,
+ }
+ };
+
+ try cg.module.intern_map.putNoClobber(gpa, .{ val.toIntern(), repr }, cacheable_id);
+
+ return cacheable_id;
+}
+
+fn constantPtr(cg: *CodeGen, ptr_val: Value) !Id {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const gpa = cg.module.gpa;
+
+ if (ptr_val.isUndef(zcu)) {
+ const result_ty = ptr_val.typeOf(zcu);
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return cg.module.constUndef(result_ty_id);
+ }
+
+ var arena = std.heap.ArenaAllocator.init(gpa);
+ defer arena.deinit();
+
+ const derivation = try ptr_val.pointerDerivation(arena.allocator(), pt);
+ return cg.derivePtr(derivation);
+}
+
+fn derivePtr(cg: *CodeGen, derivation: Value.PointerDeriveStep) !Id {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ switch (derivation) {
+ .comptime_alloc_ptr, .comptime_field_ptr => unreachable,
+ .int => |int| {
+ const result_ty_id = try cg.resolveType(int.ptr_ty, .direct);
+ // TODO: This can probably be an OpSpecConstantOp Bitcast, but
+ // that is not implemented by Mesa yet. Therefore, just generate it
+ // as a runtime operation.
+ const result_ptr_id = cg.module.allocId();
+ const value_id = try cg.constInt(.usize, int.addr);
+ try cg.body.emit(cg.module.gpa, .OpConvertUToPtr, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_ptr_id,
+ .integer_value = value_id,
+ });
+ return result_ptr_id;
+ },
+ .nav_ptr => |nav| {
+ const result_ptr_ty = try pt.navPtrType(nav);
+ return cg.constantNavRef(result_ptr_ty, nav);
+ },
+ .uav_ptr => |uav| {
+ const result_ptr_ty: Type = .fromInterned(uav.orig_ty);
+ return cg.constantUavRef(result_ptr_ty, uav);
+ },
+ .eu_payload_ptr => @panic("TODO"),
+ .opt_payload_ptr => @panic("TODO"),
+ .field_ptr => |field| {
+ const parent_ptr_id = try cg.derivePtr(field.parent.*);
+ const parent_ptr_ty = try field.parent.ptrType(pt);
+ return cg.structFieldPtr(field.result_ptr_ty, parent_ptr_ty, parent_ptr_id, field.field_idx);
+ },
+ .elem_ptr => |elem| {
+ const parent_ptr_id = try cg.derivePtr(elem.parent.*);
+ const parent_ptr_ty = try elem.parent.ptrType(pt);
+ const index_id = try cg.constInt(.usize, elem.elem_idx);
+ return cg.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id);
+ },
+ .offset_and_cast => |oac| {
+ const parent_ptr_id = try cg.derivePtr(oac.parent.*);
+ const parent_ptr_ty = try oac.parent.ptrType(pt);
+ const result_ty_id = try cg.resolveType(oac.new_ptr_ty, .direct);
+ const child_size = oac.new_ptr_ty.childType(zcu).abiSize(zcu);
+
+ if (parent_ptr_ty.childType(zcu).isVector(zcu) and oac.byte_offset % child_size == 0) {
+ // Vector element ptr accesses are derived as offset_and_cast.
+ // We can just use OpAccessChain.
+ return cg.accessChain(
+ result_ty_id,
+ parent_ptr_id,
+ &.{@intCast(@divExact(oac.byte_offset, child_size))},
+ );
+ }
+
+ if (oac.byte_offset == 0) {
+ // Allow changing the pointer type child only to restructure arrays.
+ // e.g. [3][2]T to T is fine, as is [2]T -> [2][1]T.
+ const result_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_ptr_id,
+ .operand = parent_ptr_id,
+ });
+ return result_ptr_id;
+ }
+
+ return cg.fail("cannot perform pointer cast: '{f}' to '{f}'", .{
+ parent_ptr_ty.fmt(pt),
+ oac.new_ptr_ty.fmt(pt),
+ });
+ },
+ }
+}
+
+fn constantUavRef(
+ cg: *CodeGen,
+ ty: Type,
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
+) !Id {
+ // TODO: Merge this function with constantDeclRef.
+
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const ty_id = try cg.resolveType(ty, .direct);
+ const uav_ty: Type = .fromInterned(ip.typeOf(uav.val));
+
+ switch (ip.indexToKey(uav.val)) {
+ .func => unreachable, // TODO
+ .@"extern" => assert(!ip.isFunctionType(uav_ty.toIntern())),
+ else => {},
+ }
+
+ // const is_fn_body = decl_ty.zigTypeTag(zcu) == .@"fn";
+ if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
+ // Pointer to nothing - return undefined
+ return cg.module.constUndef(ty_id);
+ }
+
+ // Uav refs are always generic.
+ assert(ty.ptrAddressSpace(zcu) == .generic);
+ const uav_ty_id = try cg.resolveType(uav_ty, .indirect);
+ const decl_ptr_ty_id = try cg.module.ptrType(uav_ty_id, .generic);
+ const ptr_id = try cg.resolveUav(uav.val);
+
+ if (decl_ptr_ty_id != ty_id) {
+ // Differing pointer types, insert a cast.
+ const casted_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = ty_id,
+ .id_result = casted_ptr_id,
+ .operand = ptr_id,
+ });
+ return casted_ptr_id;
+ } else {
+ return ptr_id;
+ }
+}
+
+fn constantNavRef(cg: *CodeGen, ty: Type, nav_index: InternPool.Nav.Index) !Id {
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const ty_id = try cg.resolveType(ty, .direct);
+ const nav = ip.getNav(nav_index);
+ const nav_ty: Type = .fromInterned(nav.typeOf(ip));
+
+ switch (nav.status) {
+ .unresolved => unreachable,
+ .type_resolved => {}, // this is not a function or extern
+ .fully_resolved => |r| switch (ip.indexToKey(r.val)) {
+ .func => {
+ // TODO: Properly lower function pointers. For now we are going to hack around it and
+ // just generate an empty pointer. Function pointers are represented by a pointer to usize.
+ return try cg.module.constUndef(ty_id);
+ },
+ .@"extern" => if (ip.isFunctionType(nav_ty.toIntern())) @panic("TODO"),
+ else => {},
+ },
+ }
+
+ if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
+ // Pointer to nothing - return undefined.
+ return cg.module.constUndef(ty_id);
+ }
+
+ const spv_decl_index = try cg.module.resolveNav(ip, nav_index);
+ const spv_decl = cg.module.declPtr(spv_decl_index);
+ assert(spv_decl.kind != .func);
+
+ const storage_class = cg.module.storageClass(nav.getAddrspace());
+ try cg.addFunctionDep(spv_decl_index, storage_class);
+
+ const nav_ty_id = try cg.resolveType(nav_ty, .indirect);
+ const decl_ptr_ty_id = try cg.module.ptrType(nav_ty_id, storage_class);
+
+ if (decl_ptr_ty_id != ty_id) {
+ // Differing pointer types, insert a cast.
+ const casted_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = ty_id,
+ .id_result = casted_ptr_id,
+ .operand = spv_decl.result_id,
+ });
+ return casted_ptr_id;
+ }
+
+ return spv_decl.result_id;
+}
+
+// Turn a Zig type's name into a cache reference.
+fn resolveTypeName(cg: *CodeGen, ty: Type) ![]const u8 {
+ const gpa = cg.module.gpa;
+ var aw: std.io.Writer.Allocating = .init(gpa);
+ defer aw.deinit();
+ ty.print(&aw.writer, cg.pt) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ };
+ return try aw.toOwnedSlice();
+}
+
+/// Generate a union type. Union types are always generated with the
+/// most aligned field active. If the tag alignment is greater
+/// than that of the payload, a regular union (non-packed, with both tag and
+/// payload), will be generated as follows:
+/// struct {
+/// tag: TagType,
+/// payload: MostAlignedFieldType,
+/// payload_padding: [payload_size - @sizeOf(MostAlignedFieldType)]u8,
+/// padding: [padding_size]u8,
+/// }
+/// If the payload alignment is greater than that of the tag:
+/// struct {
+/// payload: MostAlignedFieldType,
+/// payload_padding: [payload_size - @sizeOf(MostAlignedFieldType)]u8,
+/// tag: TagType,
+/// padding: [padding_size]u8,
+/// }
+/// If any of the fields' size is 0, it will be omitted.
+fn resolveUnionType(cg: *CodeGen, ty: Type) !Id {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const union_obj = zcu.typeToUnion(ty).?;
+
+ if (union_obj.flagsUnordered(ip).layout == .@"packed") {
+ return try cg.module.intType(.unsigned, @intCast(ty.bitSize(zcu)));
+ }
+
+ const layout = cg.unionLayout(ty);
+ if (!layout.has_payload) {
+ // No payload, so represent this as just the tag type.
+ return try cg.resolveType(.fromInterned(union_obj.enum_tag_ty), .indirect);
+ }
+
+ var member_types: [4]Id = undefined;
+ var member_names: [4][]const u8 = undefined;
+
+ const u8_ty_id = try cg.resolveType(.u8, .direct);
+
+ if (layout.tag_size != 0) {
+ const tag_ty_id = try cg.resolveType(.fromInterned(union_obj.enum_tag_ty), .indirect);
+ member_types[layout.tag_index] = tag_ty_id;
+ member_names[layout.tag_index] = "(tag)";
+ }
+
+ if (layout.payload_size != 0) {
+ const payload_ty_id = try cg.resolveType(layout.payload_ty, .indirect);
+ member_types[layout.payload_index] = payload_ty_id;
+ member_names[layout.payload_index] = "(payload)";
+ }
+
+ if (layout.payload_padding_size != 0) {
+ const len_id = try cg.constInt(.u32, layout.payload_padding_size);
+ const payload_padding_ty_id = try cg.module.arrayType(len_id, u8_ty_id);
+ member_types[layout.payload_padding_index] = payload_padding_ty_id;
+ member_names[layout.payload_padding_index] = "(payload padding)";
+ }
+
+ if (layout.padding_size != 0) {
+ const len_id = try cg.constInt(.u32, layout.padding_size);
+ const padding_ty_id = try cg.module.arrayType(len_id, u8_ty_id);
+ member_types[layout.padding_index] = padding_ty_id;
+ member_names[layout.padding_index] = "(padding)";
+ }
+
+ const result_id = try cg.module.structType(
+ member_types[0..layout.total_fields],
+ member_names[0..layout.total_fields],
+ null,
+ .none,
+ );
+
+ const type_name = try cg.resolveTypeName(ty);
+ defer gpa.free(type_name);
+ try cg.module.debugName(result_id, type_name);
+
+ return result_id;
+}
+
+fn resolveFnReturnType(cg: *CodeGen, ret_ty: Type) !Id {
+ const zcu = cg.module.zcu;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ // If the return type is an error set or an error union, then we make this
+ // anyerror return type instead, so that it can be coerced into a function
+ // pointer type which has anyerror as the return type.
+ if (ret_ty.isError(zcu)) {
+ return cg.resolveType(.anyerror, .direct);
+ } else {
+ return cg.resolveType(.void, .direct);
+ }
+ }
+
+ return try cg.resolveType(ret_ty, .direct);
+}
+
+fn resolveType(cg: *CodeGen, ty: Type, repr: Repr) Error!Id {
+ const gpa = cg.module.gpa;
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const target = cg.module.zcu.getTarget();
+
+ log.debug("resolveType: ty = {f}", .{ty.fmt(pt)});
+
+ switch (ty.zigTypeTag(zcu)) {
+ .noreturn => {
+ assert(repr == .direct);
+ return try cg.module.voidType();
+ },
+ .void => switch (repr) {
+ .direct => return try cg.module.voidType(),
+ .indirect => return try cg.module.opaqueType("void"),
+ },
+ .bool => switch (repr) {
+ .direct => return try cg.module.boolType(),
+ .indirect => return try cg.resolveType(.u1, .indirect),
+ },
+ .int => {
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits == 0) {
+ assert(repr == .indirect);
+ return try cg.module.opaqueType("u0");
+ }
+ return try cg.module.intType(int_info.signedness, int_info.bits);
+ },
+ .@"enum" => return try cg.resolveType(ty.intTagType(zcu), repr),
+ .float => {
+ const bits = ty.floatBits(target);
+ const supported = switch (bits) {
+ 16 => target.cpu.has(.spirv, .float16),
+ 32 => true,
+ 64 => target.cpu.has(.spirv, .float64),
+ else => false,
+ };
+
+ if (!supported) {
+ return cg.fail(
+ "floating point width of {} bits is not supported for the current SPIR-V feature set",
+ .{bits},
+ );
+ }
+
+ return try cg.module.floatType(bits);
+ },
+ .array => {
+ const elem_ty = ty.childType(zcu);
+ const elem_ty_id = try cg.resolveType(elem_ty, .indirect);
+ const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse {
+ return cg.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(zcu)});
+ };
+
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ assert(repr == .indirect);
+ return try cg.module.opaqueType("zero-sized-array");
+ } else if (total_len == 0) {
+ // The size of the array would be 0, but that is not allowed in SPIR-V.
+ // This path can be reached for example when there is a slicing of a pointer
+ // that produces a zero-length array. In all cases where this type can be generated,
+ // this should be an indirect path.
+ assert(repr == .indirect);
+ // In this case, we have an array of a non-zero sized type. In this case,
+ // generate an array of 1 element instead, so that ptr_elem_ptr instructions
+ // can be lowered to ptrAccessChain instead of manually performing the math.
+ const len_id = try cg.constInt(.u32, 1);
+ return try cg.module.arrayType(len_id, elem_ty_id);
+ } else {
+ const total_len_id = try cg.constInt(.u32, total_len);
+ const result_id = try cg.module.arrayType(total_len_id, elem_ty_id);
+ switch (target.os.tag) {
+ .vulkan, .opengl => {
+ try cg.module.decorate(result_id, .{
+ .array_stride = .{
+ .array_stride = @intCast(elem_ty.abiSize(zcu)),
+ },
+ });
+ },
+ else => {},
+ }
+ return result_id;
+ }
+ },
+ .vector => {
+ const elem_ty = ty.childType(zcu);
+ const elem_ty_id = try cg.resolveType(elem_ty, repr);
+ const len = ty.vectorLen(zcu);
+ if (cg.isSpvVector(ty)) return try cg.module.vectorType(len, elem_ty_id);
+ const len_id = try cg.constInt(.u32, len);
+ return try cg.module.arrayType(len_id, elem_ty_id);
+ },
+ .@"fn" => switch (repr) {
+ .direct => {
+ const fn_info = zcu.typeToFunc(ty).?;
+
+ comptime assert(zig_call_abi_ver == 3);
+ assert(!fn_info.is_var_args);
+ switch (fn_info.cc) {
+ .auto,
+ .spirv_kernel,
+ .spirv_fragment,
+ .spirv_vertex,
+ .spirv_device,
+ => {},
+ else => unreachable,
+ }
+
+ const return_ty_id = try cg.resolveFnReturnType(.fromInterned(fn_info.return_type));
+ const param_ty_ids = try gpa.alloc(Id, fn_info.param_types.len);
+ defer gpa.free(param_ty_ids);
+ var param_index: usize = 0;
+ for (fn_info.param_types.get(ip)) |param_ty_index| {
+ const param_ty: Type = .fromInterned(param_ty_index);
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ param_ty_ids[param_index] = try cg.resolveType(param_ty, .direct);
+ param_index += 1;
+ }
+
+ return try cg.module.functionType(return_ty_id, param_ty_ids[0..param_index]);
+ },
+ .indirect => {
+ // TODO: Represent function pointers properly.
+ // For now, just use an usize type.
+ return try cg.resolveType(.usize, .indirect);
+ },
+ },
+ .pointer => {
+ const ptr_info = ty.ptrInfo(zcu);
+
+ const child_ty: Type = .fromInterned(ptr_info.child);
+ const child_ty_id = try cg.resolveType(child_ty, .indirect);
+ const storage_class = cg.module.storageClass(ptr_info.flags.address_space);
+ const ptr_ty_id = try cg.module.ptrType(child_ty_id, storage_class);
+
+ if (ptr_info.flags.size != .slice) {
+ return ptr_ty_id;
+ }
+
+ const size_ty_id = try cg.resolveType(.usize, .direct);
+ return try cg.module.structType(
+ &.{ ptr_ty_id, size_ty_id },
+ &.{ "ptr", "len" },
+ null,
+ .none,
+ );
+ },
+ .@"struct" => {
+ const struct_type = switch (ip.indexToKey(ty.toIntern())) {
+ .tuple_type => |tuple| {
+ const member_types = try gpa.alloc(Id, tuple.values.len);
+ defer gpa.free(member_types);
+
+ var member_index: usize = 0;
+ for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
+
+ member_types[member_index] = try cg.resolveType(.fromInterned(field_ty), .indirect);
+ member_index += 1;
+ }
+
+ const result_id = try cg.module.structType(
+ member_types[0..member_index],
+ null,
+ null,
+ .none,
+ );
+ const type_name = try cg.resolveTypeName(ty);
+ defer gpa.free(type_name);
+ try cg.module.debugName(result_id, type_name);
+ return result_id;
+ },
+ .struct_type => ip.loadStructType(ty.toIntern()),
+ else => unreachable,
+ };
+
+ if (struct_type.layout == .@"packed") {
+ return try cg.resolveType(.fromInterned(struct_type.backingIntTypeUnordered(ip)), .direct);
+ }
+
+ var member_types = std.ArrayList(Id).init(gpa);
+ defer member_types.deinit();
+
+ var member_names = std.ArrayList([]const u8).init(gpa);
+ defer member_names.deinit();
+
+ var member_offsets = std.ArrayList(u32).init(gpa);
+ defer member_offsets.deinit();
+
+ var it = struct_type.iterateRuntimeOrder(ip);
+ while (it.next()) |field_index| {
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+
+ const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
+ try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
+ try member_types.append(try cg.resolveType(field_ty, .indirect));
+ try member_names.append(field_name.toSlice(ip));
+ try member_offsets.append(@intCast(ty.structFieldOffset(field_index, zcu)));
+ }
+
+ const result_id = try cg.module.structType(
+ member_types.items,
+ member_names.items,
+ member_offsets.items,
+ ty.toIntern(),
+ );
+
+ const type_name = try cg.resolveTypeName(ty);
+ defer gpa.free(type_name);
+ try cg.module.debugName(result_id, type_name);
+
+ return result_id;
+ },
+ .optional => {
+ const payload_ty = ty.optionalChild(zcu);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ // Just use a bool.
+ // Note: Always generate the bool with indirect format, to save on some sanity
+ // Perform the conversion to a direct bool when the field is extracted.
+ return try cg.resolveType(.bool, .indirect);
+ }
+
+ const payload_ty_id = try cg.resolveType(payload_ty, .indirect);
+ if (ty.optionalReprIsPayload(zcu)) {
+ // Optional is actually a pointer or a slice.
+ return payload_ty_id;
+ }
+
+ const bool_ty_id = try cg.resolveType(.bool, .indirect);
+
+ return try cg.module.structType(
+ &.{ payload_ty_id, bool_ty_id },
+ &.{ "payload", "valid" },
+ null,
+ .none,
+ );
+ },
+ .@"union" => return try cg.resolveUnionType(ty),
+ .error_set => {
+ const err_int_ty = try pt.errorIntType();
+ return try cg.resolveType(err_int_ty, repr);
+ },
+ .error_union => {
+ const payload_ty = ty.errorUnionPayload(zcu);
+ const err_ty = ty.errorUnionSet(zcu);
+ const error_ty_id = try cg.resolveType(err_ty, .indirect);
+
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+ if (!eu_layout.payload_has_bits) {
+ return error_ty_id;
+ }
+
+ const payload_ty_id = try cg.resolveType(payload_ty, .indirect);
+
+ var member_types: [2]Id = undefined;
+ var member_names: [2][]const u8 = undefined;
+ if (eu_layout.error_first) {
+ // Put the error first
+ member_types = .{ error_ty_id, payload_ty_id };
+ member_names = .{ "error", "payload" };
+ // TODO: ABI padding?
+ } else {
+ // Put the payload first.
+ member_types = .{ payload_ty_id, error_ty_id };
+ member_names = .{ "payload", "error" };
+ // TODO: ABI padding?
+ }
+
+ return try cg.module.structType(&member_types, &member_names, null, .none);
+ },
+ .@"opaque" => {
+ const type_name = try cg.resolveTypeName(ty);
+ defer gpa.free(type_name);
+ return try cg.module.opaqueType(type_name);
+ },
+
+ .null,
+ .undefined,
+ .enum_literal,
+ .comptime_float,
+ .comptime_int,
+ .type,
+ => unreachable, // Must be comptime.
+
+ .frame, .@"anyframe" => unreachable, // TODO
+ }
+}
+
+const ErrorUnionLayout = struct {
+ payload_has_bits: bool,
+ error_first: bool,
+
+ fn errorFieldIndex(cg: @This()) u32 {
+ assert(cg.payload_has_bits);
+ return if (cg.error_first) 0 else 1;
+ }
+
+ fn payloadFieldIndex(cg: @This()) u32 {
+ assert(cg.payload_has_bits);
+ return if (cg.error_first) 1 else 0;
+ }
+};
+
+fn errorUnionLayout(cg: *CodeGen, payload_ty: Type) ErrorUnionLayout {
+ const zcu = cg.module.zcu;
+
+ const error_align = Type.abiAlignment(.anyerror, zcu);
+ const payload_align = payload_ty.abiAlignment(zcu);
+
+ const error_first = error_align.compare(.gt, payload_align);
+ return .{
+ .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(zcu),
+ .error_first = error_first,
+ };
+}
+
+const UnionLayout = struct {
+ /// If false, this union is represented
+ /// by only an integer of the tag type.
+ has_payload: bool,
+ tag_size: u32,
+ tag_index: u32,
+ /// Note: This is the size of the payload type itcg, NOT the size of the ENTIRE payload.
+ /// Use `has_payload` instead!!
+ payload_ty: Type,
+ payload_size: u32,
+ payload_index: u32,
+ payload_padding_size: u32,
+ payload_padding_index: u32,
+ padding_size: u32,
+ padding_index: u32,
+ total_fields: u32,
+};
+
+fn unionLayout(cg: *CodeGen, ty: Type) UnionLayout {
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const layout = ty.unionGetLayout(zcu);
+ const union_obj = zcu.typeToUnion(ty).?;
+
+ var union_layout: UnionLayout = .{
+ .has_payload = layout.payload_size != 0,
+ .tag_size = @intCast(layout.tag_size),
+ .tag_index = undefined,
+ .payload_ty = undefined,
+ .payload_size = undefined,
+ .payload_index = undefined,
+ .payload_padding_size = undefined,
+ .payload_padding_index = undefined,
+ .padding_size = @intCast(layout.padding),
+ .padding_index = undefined,
+ .total_fields = undefined,
+ };
+
+ if (union_layout.has_payload) {
+ const most_aligned_field = layout.most_aligned_field;
+ const most_aligned_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[most_aligned_field]);
+ union_layout.payload_ty = most_aligned_field_ty;
+ union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(zcu));
+ } else {
+ union_layout.payload_size = 0;
+ }
+
+ union_layout.payload_padding_size = @intCast(layout.payload_size - union_layout.payload_size);
+
+ const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
+ var field_index: u32 = 0;
+
+ if (union_layout.tag_size != 0 and tag_first) {
+ union_layout.tag_index = field_index;
+ field_index += 1;
+ }
+
+ if (union_layout.payload_size != 0) {
+ union_layout.payload_index = field_index;
+ field_index += 1;
+ }
+
+ if (union_layout.payload_padding_size != 0) {
+ union_layout.payload_padding_index = field_index;
+ field_index += 1;
+ }
+
+ if (union_layout.tag_size != 0 and !tag_first) {
+ union_layout.tag_index = field_index;
+ field_index += 1;
+ }
+
+ if (union_layout.padding_size != 0) {
+ union_layout.padding_index = field_index;
+ field_index += 1;
+ }
+
+ union_layout.total_fields = field_index;
+
+ return union_layout;
+}
+
+/// This structure represents a "temporary" value: Something we are currently
+/// operating on. It typically lives no longer than the function that
+/// implements a particular AIR operation. These are used to easier
+/// implement vectorizable operations (see Vectorization and the build*
+/// functions), and typically are only used for vectors of primitive types.
+const Temporary = struct {
+ /// The type of the temporary. This is here mainly
+ /// for easier bookkeeping. Because we will never really
+ /// store Temporaries, they only cause extra stack space,
+ /// therefore no real storage is wasted.
+ ty: Type,
+ /// The value that this temporary holds. This is not necessarily
+ /// a value that is actually usable, or a single value: It is virtual
+ /// until materialize() is called, at which point is turned into
+ /// the usual SPIR-V representation of `cg.ty`.
+ value: Temporary.Value,
+
+ const Value = union(enum) {
+ singleton: Id,
+ exploded_vector: IdRange,
+ };
+
+ fn init(ty: Type, singleton: Id) Temporary {
+ return .{ .ty = ty, .value = .{ .singleton = singleton } };
+ }
+
+ fn materialize(temp: Temporary, cg: *CodeGen) !Id {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ switch (temp.value) {
+ .singleton => |id| return id,
+ .exploded_vector => |range| {
+ assert(temp.ty.isVector(zcu));
+ assert(temp.ty.vectorLen(zcu) == range.len);
+ const constituents = try gpa.alloc(Id, range.len);
+ defer gpa.free(constituents);
+ for (constituents, 0..range.len) |*id, i| {
+ id.* = range.at(i);
+ }
+ const result_ty_id = try cg.resolveType(temp.ty, .direct);
+ return cg.constructComposite(result_ty_id, constituents);
+ },
+ }
+ }
+
+ fn vectorization(temp: Temporary, cg: *CodeGen) Vectorization {
+ return .fromType(temp.ty, cg);
+ }
+
+ fn pun(temp: Temporary, new_ty: Type) Temporary {
+ return .{
+ .ty = new_ty,
+ .value = temp.value,
+ };
+ }
+
+ /// 'Explode' a temporary into separate elements. This turns a vector
+ /// into a bag of elements.
+ fn explode(temp: Temporary, cg: *CodeGen) !IdRange {
+ const zcu = cg.module.zcu;
+
+ // If the value is a scalar, then this is a no-op.
+ if (!temp.ty.isVector(zcu)) {
+ return switch (temp.value) {
+ .singleton => |id| .{ .base = @intFromEnum(id), .len = 1 },
+ .exploded_vector => |range| range,
+ };
+ }
+
+ const ty_id = try cg.resolveType(temp.ty.scalarType(zcu), .direct);
+ const n = temp.ty.vectorLen(zcu);
+ const results = cg.module.allocIds(n);
+
+ const id = switch (temp.value) {
+ .singleton => |id| id,
+ .exploded_vector => |range| return range,
+ };
+
+ for (0..n) |i| {
+ const indexes = [_]u32{@intCast(i)};
+ try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ .id_result_type = ty_id,
+ .id_result = results.at(i),
+ .composite = id,
+ .indexes = &indexes,
+ });
+ }
+
+ return results;
+ }
+};
+
+/// Initialize a `Temporary` from an AIR value.
+fn temporary(cg: *CodeGen, inst: Air.Inst.Ref) !Temporary {
+ return .{
+ .ty = cg.typeOf(inst),
+ .value = .{ .singleton = try cg.resolve(inst) },
+ };
+}
+
+/// This union describes how a particular operation should be vectorized.
+/// That depends on the operation and number of components of the inputs.
+const Vectorization = union(enum) {
+ /// This is an operation between scalars.
+ scalar,
+ /// This operation is unrolled into separate operations.
+ /// Inputs may still be SPIR-V vectors, for example,
+ /// when the operation can't be vectorized in SPIR-V.
+ /// Value is number of components.
+ unrolled: u32,
+
+ /// Derive a vectorization from a particular type
+ fn fromType(ty: Type, cg: *CodeGen) Vectorization {
+ const zcu = cg.module.zcu;
+ if (!ty.isVector(zcu)) return .scalar;
+ return .{ .unrolled = ty.vectorLen(zcu) };
+ }
+
+ /// Given two vectorization methods, compute a "unification": a fallback
+ /// that works for both, according to the following rules:
+ /// - Scalars may broadcast
+ /// - SPIR-V vectorized operations will unroll
+ /// - Prefer scalar > unrolled
+ fn unify(a: Vectorization, b: Vectorization) Vectorization {
+ if (a == .scalar and b == .scalar) return .scalar;
+ if (a == .unrolled or b == .unrolled) {
+ if (a == .unrolled and b == .unrolled) assert(a.components() == b.components());
+ if (a == .unrolled) return .{ .unrolled = a.components() };
+ return .{ .unrolled = b.components() };
+ }
+ unreachable;
+ }
+
+ /// Query the number of components that inputs of this operation have.
+ /// Note: for broadcasting scalars, this returns the number of elements
+ /// that the broadcasted vector would have.
+ fn components(vec: Vectorization) u32 {
+ return switch (vec) {
+ .scalar => 1,
+ .unrolled => |n| n,
+ };
+ }
+
+ /// Turns `ty` into the result-type of the entire operation.
+ /// `ty` may be a scalar or vector, it doesn't matter.
+ fn resultType(vec: Vectorization, cg: *CodeGen, ty: Type) !Type {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const scalar_ty = ty.scalarType(zcu);
+ return switch (vec) {
+ .scalar => scalar_ty,
+ .unrolled => |n| try pt.vectorType(.{ .len = n, .child = scalar_ty.toIntern() }),
+ };
+ }
+
+ /// Before a temporary can be used, some setup may need to be one. This function implements
+ /// this setup, and returns a new type that holds the relevant information on how to access
+ /// elements of the input.
+ fn prepare(vec: Vectorization, cg: *CodeGen, tmp: Temporary) !PreparedOperand {
+ const zcu = cg.module.zcu;
+ const is_vector = tmp.ty.isVector(zcu);
+ const value: PreparedOperand.Value = switch (tmp.value) {
+ .singleton => |id| switch (vec) {
+ .scalar => blk: {
+ assert(!is_vector);
+ break :blk .{ .scalar = id };
+ },
+ .unrolled => blk: {
+ if (is_vector) break :blk .{ .vector_exploded = try tmp.explode(cg) };
+ break :blk .{ .scalar_broadcast = id };
+ },
+ },
+ .exploded_vector => |range| switch (vec) {
+ .scalar => unreachable,
+ .unrolled => |n| blk: {
+ assert(range.len == n);
+ break :blk .{ .vector_exploded = range };
+ },
+ },
+ };
+
+ return .{
+ .ty = tmp.ty,
+ .value = value,
+ };
+ }
+
+ /// Finalize the results of an operation back into a temporary. `results` is
+ /// a list of result-ids of the operation.
+ fn finalize(vec: Vectorization, ty: Type, results: IdRange) Temporary {
+ assert(vec.components() == results.len);
+ return .{
+ .ty = ty,
+ .value = switch (vec) {
+ .scalar => .{ .singleton = results.at(0) },
+ .unrolled => .{ .exploded_vector = results },
+ },
+ };
+ }
+
+ /// This struct represents an operand that has gone through some setup, and is
+ /// ready to be used as part of an operation.
+ const PreparedOperand = struct {
+ ty: Type,
+ value: PreparedOperand.Value,
+
+ /// The types of value that a prepared operand can hold internally. Depends
+ /// on the operation and input value.
+ const Value = union(enum) {
+ /// A single scalar value that is used by a scalar operation.
+ scalar: Id,
+ /// A single scalar that is broadcasted in an unrolled operation.
+ scalar_broadcast: Id,
+ /// A vector represented by a consecutive list of IDs that is used in an unrolled operation.
+ vector_exploded: IdRange,
+ };
+
+ /// Query the value at a particular index of the operation. Note that
+ /// the index is *not* the component/lane, but the index of the *operation*.
+ fn at(op: PreparedOperand, i: usize) Id {
+ switch (op.value) {
+ .scalar => |id| {
+ assert(i == 0);
+ return id;
+ },
+ .scalar_broadcast => |id| return id,
+ .vector_exploded => |range| return range.at(i),
+ }
+ }
+ };
+};
+
+/// A utility function to compute the vectorization style of
+/// a list of values. These values may be any of the following:
+/// - A `Vectorization` instance
+/// - A Type, in which case the vectorization is computed via `Vectorization.fromType`.
+/// - A Temporary, in which case the vectorization is computed via `Temporary.vectorization`.
+fn vectorization(cg: *CodeGen, args: anytype) Vectorization {
+ var v: Vectorization = undefined;
+ assert(args.len >= 1);
+ inline for (args, 0..) |arg, i| {
+ const iv: Vectorization = switch (@TypeOf(arg)) {
+ Vectorization => arg,
+ Type => Vectorization.fromType(arg, cg),
+ Temporary => arg.vectorization(cg),
+ else => @compileError("invalid type"),
+ };
+ if (i == 0) {
+ v = iv;
+ } else {
+ v = v.unify(iv);
+ }
+ }
+ return v;
+}
+
+/// This function builds an OpSConvert of OpUConvert depending on the
+/// signedness of the types.
+fn buildConvert(cg: *CodeGen, dst_ty: Type, src: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+
+ const dst_ty_id = try cg.resolveType(dst_ty.scalarType(zcu), .direct);
+ const src_ty_id = try cg.resolveType(src.ty.scalarType(zcu), .direct);
+
+ const v = cg.vectorization(.{ dst_ty, src });
+ const result_ty = try v.resultType(cg, dst_ty);
+
+ // We can directly compare integers, because those type-IDs are cached.
+ if (dst_ty_id == src_ty_id) {
+ // Nothing to do, type-pun to the right value.
+ // Note, Caller guarantees that the types fit (or caller will normalize after),
+ // so we don't have to normalize here.
+ // Note, dst_ty may be a scalar type even if we expect a vector, so we have to
+ // convert to the right type here.
+ return src.pun(result_ty);
+ }
+
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty = dst_ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+
+ const opcode: Opcode = blk: {
+ if (dst_ty.scalarType(zcu).isAnyFloat()) break :blk .OpFConvert;
+ if (dst_ty.scalarType(zcu).isSignedInt(zcu)) break :blk .OpSConvert;
+ break :blk .OpUConvert;
+ };
+
+ const op_src = try v.prepare(cg, src);
+
+ for (0..ops) |i| {
+ try cg.body.emitRaw(cg.module.gpa, opcode, 3);
+ cg.body.writeOperand(Id, op_result_ty_id);
+ cg.body.writeOperand(Id, results.at(i));
+ cg.body.writeOperand(Id, op_src.at(i));
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+fn buildFma(cg: *CodeGen, a: Temporary, b: Temporary, c: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+
+ const v = cg.vectorization(.{ a, b, c });
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty = a.ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, a.ty);
+
+ const op_a = try v.prepare(cg, a);
+ const op_b = try v.prepare(cg, b);
+ const op_c = try v.prepare(cg, c);
+
+ const set = try cg.importExtendedSet();
+
+ // TODO: Put these numbers in some definition
+ const instruction: u32 = switch (target.os.tag) {
+ .opencl => 26, // fma
+ // NOTE: Vulkan's FMA instruction does *NOT* produce the right values!
+ // its precision guarantees do NOT match zigs and it does NOT match OpenCLs!
+ // it needs to be emulated!
+ .vulkan, .opengl => return cg.todo("implement fma operation for {s} os", .{@tagName(target.os.tag)}),
+ else => unreachable,
+ };
+
+ for (0..ops) |i| {
+ try cg.body.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = op_result_ty_id,
+ .id_result = results.at(i),
+ .set = set,
+ .instruction = .{ .inst = instruction },
+ .id_ref_4 = &.{ op_a.at(i), op_b.at(i), op_c.at(i) },
+ });
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+fn buildSelect(cg: *CodeGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+
+ const v = cg.vectorization(.{ condition, lhs, rhs });
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty = lhs.ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, lhs.ty);
+
+ assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .bool);
+
+ const cond = try v.prepare(cg, condition);
+ const object_1 = try v.prepare(cg, lhs);
+ const object_2 = try v.prepare(cg, rhs);
+
+ for (0..ops) |i| {
+ try cg.body.emit(cg.module.gpa, .OpSelect, .{
+ .id_result_type = op_result_ty_id,
+ .id_result = results.at(i),
+ .condition = cond.at(i),
+ .object_1 = object_1.at(i),
+ .object_2 = object_2.at(i),
+ });
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+fn buildCmp(cg: *CodeGen, opcode: Opcode, lhs: Temporary, rhs: Temporary) !Temporary {
+ const v = cg.vectorization(.{ lhs, rhs });
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty: Type = .bool;
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, Type.bool);
+
+ const op_lhs = try v.prepare(cg, lhs);
+ const op_rhs = try v.prepare(cg, rhs);
+
+ for (0..ops) |i| {
+ try cg.body.emitRaw(cg.module.gpa, opcode, 4);
+ cg.body.writeOperand(Id, op_result_ty_id);
+ cg.body.writeOperand(Id, results.at(i));
+ cg.body.writeOperand(Id, op_lhs.at(i));
+ cg.body.writeOperand(Id, op_rhs.at(i));
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+const UnaryOp = enum {
+ l_not,
+ bit_not,
+ i_neg,
+ f_neg,
+ i_abs,
+ f_abs,
+ clz,
+ ctz,
+ floor,
+ ceil,
+ trunc,
+ round,
+ sqrt,
+ sin,
+ cos,
+ tan,
+ exp,
+ exp2,
+ log,
+ log2,
+ log10,
+};
+
+fn buildUnary(cg: *CodeGen, op: UnaryOp, operand: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const v = cg.vectorization(.{operand});
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+ const op_result_ty = operand.ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, operand.ty);
+
+ const op_operand = try v.prepare(cg, operand);
+
+ if (switch (op) {
+ .l_not => .OpLogicalNot,
+ .bit_not => .OpNot,
+ .i_neg => .OpSNegate,
+ .f_neg => .OpFNegate,
+ else => @as(?Opcode, null),
+ }) |opcode| {
+ for (0..ops) |i| {
+ try cg.body.emitRaw(cg.module.gpa, opcode, 3);
+ cg.body.writeOperand(Id, op_result_ty_id);
+ cg.body.writeOperand(Id, results.at(i));
+ cg.body.writeOperand(Id, op_operand.at(i));
+ }
+ } else {
+ const set = try cg.importExtendedSet();
+ const extinst: u32 = switch (target.os.tag) {
+ .opencl => switch (op) {
+ .i_abs => 141, // s_abs
+ .f_abs => 23, // fabs
+ .clz => 151, // clz
+ .ctz => 152, // ctz
+ .floor => 25, // floor
+ .ceil => 12, // ceil
+ .trunc => 66, // trunc
+ .round => 55, // round
+ .sqrt => 61, // sqrt
+ .sin => 57, // sin
+ .cos => 14, // cos
+ .tan => 62, // tan
+ .exp => 19, // exp
+ .exp2 => 20, // exp2
+ .log => 37, // log
+ .log2 => 38, // log2
+ .log10 => 39, // log10
+ else => unreachable,
+ },
+ // Note: We'll need to check these for floating point accuracy
+ // Vulkan does not put tight requirements on these, for correction
+ // we might want to emulate them at some point.
+ .vulkan, .opengl => switch (op) {
+ .i_abs => 5, // SAbs
+ .f_abs => 4, // FAbs
+ .floor => 8, // Floor
+ .ceil => 9, // Ceil
+ .trunc => 3, // Trunc
+ .round => 1, // Round
+ .clz,
+ .ctz,
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ => return cg.todo(
+ "implement unary operation '{s}' for {s} os",
+ .{ @tagName(op), @tagName(target.os.tag) },
+ ),
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+
+ for (0..ops) |i| {
+ try cg.body.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = op_result_ty_id,
+ .id_result = results.at(i),
+ .set = set,
+ .instruction = .{ .inst = extinst },
+ .id_ref_4 = &.{op_operand.at(i)},
+ });
+ }
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+fn buildBinary(cg: *CodeGen, opcode: Opcode, lhs: Temporary, rhs: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+
+ const v = cg.vectorization(.{ lhs, rhs });
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty = lhs.ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, lhs.ty);
+
+ const op_lhs = try v.prepare(cg, lhs);
+ const op_rhs = try v.prepare(cg, rhs);
+
+ for (0..ops) |i| {
+ try cg.body.emitRaw(cg.module.gpa, opcode, 4);
+ cg.body.writeOperand(Id, op_result_ty_id);
+ cg.body.writeOperand(Id, results.at(i));
+ cg.body.writeOperand(Id, op_lhs.at(i));
+ cg.body.writeOperand(Id, op_rhs.at(i));
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+/// This function builds an extended multiplication, either OpSMulExtended or OpUMulExtended on Vulkan,
+/// or OpIMul and s_mul_hi or u_mul_hi on OpenCL.
+fn buildWideMul(
+ cg: *CodeGen,
+ signedness: std.builtin.Signedness,
+ lhs: Temporary,
+ rhs: Temporary,
+) !struct { Temporary, Temporary } {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const ip = &zcu.intern_pool;
+
+ const v = lhs.vectorization(cg).unify(rhs.vectorization(cg));
+ const ops = v.components();
+
+ const arith_op_ty = lhs.ty.scalarType(zcu);
+ const arith_op_ty_id = try cg.resolveType(arith_op_ty, .direct);
+
+ const lhs_op = try v.prepare(cg, lhs);
+ const rhs_op = try v.prepare(cg, rhs);
+
+ const value_results = cg.module.allocIds(ops);
+ const overflow_results = cg.module.allocIds(ops);
+
+ switch (target.os.tag) {
+ .opencl => {
+ // Currently, SPIRV-LLVM-Translator based backends cannot deal with OpSMulExtended and
+ // OpUMulExtended. For these we will use the OpenCL s_mul_hi to compute the high-order bits
+ // instead.
+ const set = try cg.importExtendedSet();
+ const overflow_inst: u32 = switch (signedness) {
+ .signed => 160, // s_mul_hi
+ .unsigned => 203, // u_mul_hi
+ };
+
+ for (0..ops) |i| {
+ try cg.body.emit(cg.module.gpa, .OpIMul, .{
+ .id_result_type = arith_op_ty_id,
+ .id_result = value_results.at(i),
+ .operand_1 = lhs_op.at(i),
+ .operand_2 = rhs_op.at(i),
+ });
+
+ try cg.body.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = arith_op_ty_id,
+ .id_result = overflow_results.at(i),
+ .set = set,
+ .instruction = .{ .inst = overflow_inst },
+ .id_ref_4 = &.{ lhs_op.at(i), rhs_op.at(i) },
+ });
+ }
+ },
+ .vulkan, .opengl => {
+ // Operations return a struct{T, T}
+ // where T is maybe vectorized.
+ const op_result_ty: Type = .fromInterned(try ip.getTupleType(zcu.gpa, pt.tid, .{
+ .types = &.{ arith_op_ty.toIntern(), arith_op_ty.toIntern() },
+ .values = &.{ .none, .none },
+ }));
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+
+ const opcode: Opcode = switch (signedness) {
+ .signed => .OpSMulExtended,
+ .unsigned => .OpUMulExtended,
+ };
+
+ for (0..ops) |i| {
+ const op_result = cg.module.allocId();
+
+ try cg.body.emitRaw(cg.module.gpa, opcode, 4);
+ cg.body.writeOperand(Id, op_result_ty_id);
+ cg.body.writeOperand(Id, op_result);
+ cg.body.writeOperand(Id, lhs_op.at(i));
+ cg.body.writeOperand(Id, rhs_op.at(i));
+
+ // The above operation returns a struct. We might want to expand
+ // Temporary to deal with the fact that these are structs eventually,
+ // but for now, take the struct apart and return two separate vectors.
+
+ try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ .id_result_type = arith_op_ty_id,
+ .id_result = value_results.at(i),
+ .composite = op_result,
+ .indexes = &.{0},
+ });
+
+ try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ .id_result_type = arith_op_ty_id,
+ .id_result = overflow_results.at(i),
+ .composite = op_result,
+ .indexes = &.{1},
+ });
+ }
+ },
+ else => unreachable,
+ }
+
+ const result_ty = try v.resultType(cg, lhs.ty);
+ return .{
+ v.finalize(result_ty, value_results),
+ v.finalize(result_ty, overflow_results),
+ };
+}
+
+/// The SPIR-V backend is not yet advanced enough to support the std testing infrastructure.
+/// In order to be able to run tests, we "temporarily" lower test kernels into separate entry-
+/// points. The test executor will then be able to invoke these to run the tests.
+/// Note that tests are lowered according to std.builtin.TestFn, which is `fn () anyerror!void`.
+/// (anyerror!void has the same layout as anyerror).
+/// Each test declaration generates a function like.
+/// %anyerror = OpTypeInt 0 16
+/// %p_invocation_globals_struct_ty = ...
+/// %p_anyerror = OpTypePointer CrossWorkgroup %anyerror
+/// %K = OpTypeFunction %void %p_invocation_globals_struct_ty %p_anyerror
+///
+/// %test = OpFunction %void %K
+/// %p_invocation_globals = OpFunctionParameter p_invocation_globals_struct_ty
+/// %p_err = OpFunctionParameter %p_anyerror
+/// %lbl = OpLabel
+/// %result = OpFunctionCall %anyerror %func %p_invocation_globals
+/// OpStore %p_err %result
+/// OpFunctionEnd
+/// TODO is to also write out the error as a function call parameter, and to somehow fetch
+/// the name of an error in the text executor.
+fn generateTestEntryPoint(
+ cg: *CodeGen,
+ name: []const u8,
+ spv_decl_index: Module.Decl.Index,
+ test_id: Id,
+) !void {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+
+ const anyerror_ty_id = try cg.resolveType(.anyerror, .direct);
+ const ptr_anyerror_ty = try cg.pt.ptrType(.{
+ .child = .anyerror_type,
+ .flags = .{ .address_space = .global },
+ });
+ const ptr_anyerror_ty_id = try cg.resolveType(ptr_anyerror_ty, .direct);
+
+ const kernel_id = cg.module.declPtr(spv_decl_index).result_id;
+
+ const section = &cg.module.sections.functions;
+
+ const p_error_id = cg.module.allocId();
+ switch (target.os.tag) {
+ .opencl, .amdhsa => {
+ const void_ty_id = try cg.resolveType(.void, .direct);
+ const kernel_proto_ty_id = try cg.module.functionType(void_ty_id, &.{ptr_anyerror_ty_id});
+
+ try section.emit(gpa, .OpFunction, .{
+ .id_result_type = try cg.resolveType(.void, .direct),
+ .id_result = kernel_id,
+ .function_control = .{},
+ .function_type = kernel_proto_ty_id,
+ });
+
+ try section.emit(gpa, .OpFunctionParameter, .{
+ .id_result_type = ptr_anyerror_ty_id,
+ .id_result = p_error_id,
+ });
+
+ try section.emit(gpa, .OpLabel, .{
+ .id_result = cg.module.allocId(),
+ });
+ },
+ .vulkan, .opengl => {
+ if (cg.module.error_buffer == null) {
+ const spv_err_decl_index = try cg.module.allocDecl(.global);
+ try cg.module.declareDeclDeps(spv_err_decl_index, &.{});
+
+ const buffer_struct_ty_id = try cg.module.structType(
+ &.{anyerror_ty_id},
+ &.{"error_out"},
+ null,
+ .none,
+ );
+ try cg.module.decorate(buffer_struct_ty_id, .block);
+ try cg.module.decorateMember(buffer_struct_ty_id, 0, .{ .offset = .{ .byte_offset = 0 } });
+
+ const ptr_buffer_struct_ty_id = cg.module.allocId();
+ try cg.module.sections.globals.emit(gpa, .OpTypePointer, .{
+ .id_result = ptr_buffer_struct_ty_id,
+ .storage_class = cg.module.storageClass(.global),
+ .type = buffer_struct_ty_id,
+ });
+
+ const buffer_struct_id = cg.module.declPtr(spv_err_decl_index).result_id;
+ try cg.module.sections.globals.emit(gpa, .OpVariable, .{
+ .id_result_type = ptr_buffer_struct_ty_id,
+ .id_result = buffer_struct_id,
+ .storage_class = cg.module.storageClass(.global),
+ });
+ try cg.module.decorate(buffer_struct_id, .{ .descriptor_set = .{ .descriptor_set = 0 } });
+ try cg.module.decorate(buffer_struct_id, .{ .binding = .{ .binding_point = 0 } });
+
+ cg.module.error_buffer = spv_err_decl_index;
+ }
+
+ try cg.module.sections.execution_modes.emit(gpa, .OpExecutionMode, .{
+ .entry_point = kernel_id,
+ .mode = .{ .local_size = .{
+ .x_size = 1,
+ .y_size = 1,
+ .z_size = 1,
+ } },
+ });
+
+ const void_ty_id = try cg.resolveType(.void, .direct);
+ const kernel_proto_ty_id = try cg.module.functionType(void_ty_id, &.{});
+ try section.emit(gpa, .OpFunction, .{
+ .id_result_type = try cg.resolveType(.void, .direct),
+ .id_result = kernel_id,
+ .function_control = .{},
+ .function_type = kernel_proto_ty_id,
+ });
+ try section.emit(gpa, .OpLabel, .{
+ .id_result = cg.module.allocId(),
+ });
+
+ const spv_err_decl_index = cg.module.error_buffer.?;
+ const buffer_id = cg.module.declPtr(spv_err_decl_index).result_id;
+ try cg.decl_deps.put(gpa, spv_err_decl_index, {});
+
+ const zero_id = try cg.constInt(.u32, 0);
+ try section.emit(gpa, .OpInBoundsAccessChain, .{
+ .id_result_type = ptr_anyerror_ty_id,
+ .id_result = p_error_id,
+ .base = buffer_id,
+ .indexes = &.{zero_id},
+ });
+ },
+ else => unreachable,
+ }
+
+ const error_id = cg.module.allocId();
+ try section.emit(gpa, .OpFunctionCall, .{
+ .id_result_type = anyerror_ty_id,
+ .id_result = error_id,
+ .function = test_id,
+ });
+ // Note: Convert to direct not required.
+ try section.emit(gpa, .OpStore, .{
+ .pointer = p_error_id,
+ .object = error_id,
+ .memory_access = .{
+ .aligned = .{ .literal_integer = @intCast(Type.abiAlignment(.anyerror, zcu).toByteUnits().?) },
+ },
+ });
+ try section.emit(gpa, .OpReturn, {});
+ try section.emit(gpa, .OpFunctionEnd, {});
+
+ // Just generate a quick other name because the intel runtime crashes when the entry-
+ // point name is the same as a different OpName.
+ const test_name = try std.fmt.allocPrint(cg.module.arena, "test {s}", .{name});
+
+ const execution_mode: spec.ExecutionModel = switch (target.os.tag) {
+ .vulkan, .opengl => .gl_compute,
+ .opencl, .amdhsa => .kernel,
+ else => unreachable,
+ };
+
+ try cg.module.declareEntryPoint(spv_decl_index, test_name, execution_mode, null);
+}
+
+fn intFromBool(cg: *CodeGen, value: Temporary) !Temporary {
+ return try cg.intFromBool2(value, Type.u1);
+}
+
+fn intFromBool2(cg: *CodeGen, value: Temporary, result_ty: Type) !Temporary {
+ const zero_id = try cg.constInt(result_ty, 0);
+ const one_id = try cg.constInt(result_ty, 1);
+
+ return try cg.buildSelect(
+ value,
+ Temporary.init(result_ty, one_id),
+ Temporary.init(result_ty, zero_id),
+ );
+}
+
+/// Convert representation from indirect (in memory) to direct (in 'register')
+/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
+fn convertToDirect(cg: *CodeGen, ty: Type, operand_id: Id) !Id {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
+ .bool => {
+ const false_id = try cg.constBool(false, .indirect);
+ const operand_ty = blk: {
+ if (!ty.isVector(zcu)) break :blk Type.u1;
+ break :blk try pt.vectorType(.{
+ .len = ty.vectorLen(zcu),
+ .child = .u1_type,
+ });
+ };
+
+ const result = try cg.buildCmp(
+ .OpINotEqual,
+ Temporary.init(operand_ty, operand_id),
+ Temporary.init(.u1, false_id),
+ );
+ return try result.materialize(cg);
+ },
+ else => return operand_id,
+ }
+}
+
+/// Convert representation from direct (in 'register) to direct (in memory)
+/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
+fn convertToIndirect(cg: *CodeGen, ty: Type, operand_id: Id) !Id {
+ const zcu = cg.module.zcu;
+ switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
+ .bool => {
+ const result = try cg.intFromBool(Temporary.init(ty, operand_id));
+ return try result.materialize(cg);
+ },
+ else => return operand_id,
+ }
+}
+
+fn extractField(cg: *CodeGen, result_ty: Type, object: Id, field: u32) !Id {
+ const result_ty_id = try cg.resolveType(result_ty, .indirect);
+ const result_id = cg.module.allocId();
+ const indexes = [_]u32{field};
+ try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .composite = object,
+ .indexes = &indexes,
+ });
+ // Convert bools; direct structs have their field types as indirect values.
+ return try cg.convertToDirect(result_ty, result_id);
+}
+
+fn extractVectorComponent(cg: *CodeGen, result_ty: Type, vector_id: Id, field: u32) !Id {
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ const result_id = cg.module.allocId();
+ const indexes = [_]u32{field};
+ try cg.body.emit(cg.module.gpa, .OpCompositeExtract, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .composite = vector_id,
+ .indexes = &indexes,
+ });
+ // Vector components are already stored in direct representation.
+ return result_id;
+}
+
+const MemoryOptions = struct {
+ is_volatile: bool = false,
+};
+
+fn load(cg: *CodeGen, value_ty: Type, ptr_id: Id, options: MemoryOptions) !Id {
+ const zcu = cg.module.zcu;
+ const alignment: u32 = @intCast(value_ty.abiAlignment(zcu).toByteUnits().?);
+ const indirect_value_ty_id = try cg.resolveType(value_ty, .indirect);
+ const result_id = cg.module.allocId();
+ const access: spec.MemoryAccess.Extended = .{
+ .@"volatile" = options.is_volatile,
+ .aligned = .{ .literal_integer = alignment },
+ };
+ try cg.body.emit(cg.module.gpa, .OpLoad, .{
+ .id_result_type = indirect_value_ty_id,
+ .id_result = result_id,
+ .pointer = ptr_id,
+ .memory_access = access,
+ });
+ return try cg.convertToDirect(value_ty, result_id);
+}
+
+fn store(cg: *CodeGen, value_ty: Type, ptr_id: Id, value_id: Id, options: MemoryOptions) !void {
+ const indirect_value_id = try cg.convertToIndirect(value_ty, value_id);
+ const access: spec.MemoryAccess.Extended = .{ .@"volatile" = options.is_volatile };
+ try cg.body.emit(cg.module.gpa, .OpStore, .{
+ .pointer = ptr_id,
+ .object = indirect_value_id,
+ .memory_access = access,
+ });
+}
+
+fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) !void {
+ for (body) |inst| {
+ try cg.genInst(inst);
+ }
+}
+
+fn genInst(cg: *CodeGen, inst: Air.Inst.Index) Error!void {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ if (cg.liveness.isUnused(inst) and !cg.air.mustLower(inst, ip))
+ return;
+
+ const air_tags = cg.air.instructions.items(.tag);
+ const maybe_result_id: ?Id = switch (air_tags[@intFromEnum(inst)]) {
+ // zig fmt: off
+ .add, .add_wrap, .add_optimized => try cg.airArithOp(inst, .OpFAdd, .OpIAdd, .OpIAdd),
+ .sub, .sub_wrap, .sub_optimized => try cg.airArithOp(inst, .OpFSub, .OpISub, .OpISub),
+ .mul, .mul_wrap, .mul_optimized => try cg.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul),
+
+ .sqrt => try cg.airUnOpSimple(inst, .sqrt),
+ .sin => try cg.airUnOpSimple(inst, .sin),
+ .cos => try cg.airUnOpSimple(inst, .cos),
+ .tan => try cg.airUnOpSimple(inst, .tan),
+ .exp => try cg.airUnOpSimple(inst, .exp),
+ .exp2 => try cg.airUnOpSimple(inst, .exp2),
+ .log => try cg.airUnOpSimple(inst, .log),
+ .log2 => try cg.airUnOpSimple(inst, .log2),
+ .log10 => try cg.airUnOpSimple(inst, .log10),
+ .abs => try cg.airAbs(inst),
+ .floor => try cg.airUnOpSimple(inst, .floor),
+ .ceil => try cg.airUnOpSimple(inst, .ceil),
+ .round => try cg.airUnOpSimple(inst, .round),
+ .trunc_float => try cg.airUnOpSimple(inst, .trunc),
+ .neg, .neg_optimized => try cg.airUnOpSimple(inst, .f_neg),
+
+ .div_float, .div_float_optimized => try cg.airArithOp(inst, .OpFDiv, .OpSDiv, .OpUDiv),
+ .div_floor, .div_floor_optimized => try cg.airDivFloor(inst),
+ .div_trunc, .div_trunc_optimized => try cg.airDivTrunc(inst),
+
+ .rem, .rem_optimized => try cg.airArithOp(inst, .OpFRem, .OpSRem, .OpUMod),
+ .mod, .mod_optimized => try cg.airArithOp(inst, .OpFMod, .OpSMod, .OpUMod),
+
+ .add_with_overflow => try cg.airAddSubOverflow(inst, .OpIAdd, .OpULessThan, .OpSLessThan),
+ .sub_with_overflow => try cg.airAddSubOverflow(inst, .OpISub, .OpUGreaterThan, .OpSGreaterThan),
+ .mul_with_overflow => try cg.airMulOverflow(inst),
+ .shl_with_overflow => try cg.airShlOverflow(inst),
+
+ .mul_add => try cg.airMulAdd(inst),
+
+ .ctz => try cg.airClzCtz(inst, .ctz),
+ .clz => try cg.airClzCtz(inst, .clz),
+
+ .select => try cg.airSelect(inst),
+
+ .splat => try cg.airSplat(inst),
+ .reduce, .reduce_optimized => try cg.airReduce(inst),
+ .shuffle_one => try cg.airShuffleOne(inst),
+ .shuffle_two => try cg.airShuffleTwo(inst),
+
+ .ptr_add => try cg.airPtrAdd(inst),
+ .ptr_sub => try cg.airPtrSub(inst),
+
+ .bit_and => try cg.airBinOpSimple(inst, .OpBitwiseAnd),
+ .bit_or => try cg.airBinOpSimple(inst, .OpBitwiseOr),
+ .xor => try cg.airBinOpSimple(inst, .OpBitwiseXor),
+ .bool_and => try cg.airBinOpSimple(inst, .OpLogicalAnd),
+ .bool_or => try cg.airBinOpSimple(inst, .OpLogicalOr),
+
+ .shl, .shl_exact => try cg.airShift(inst, .OpShiftLeftLogical, .OpShiftLeftLogical),
+ .shr, .shr_exact => try cg.airShift(inst, .OpShiftRightLogical, .OpShiftRightArithmetic),
+
+ .min => try cg.airMinMax(inst, .min),
+ .max => try cg.airMinMax(inst, .max),
+
+ .bitcast => try cg.airBitCast(inst),
+ .intcast, .trunc => try cg.airIntCast(inst),
+ .float_from_int => try cg.airFloatFromInt(inst),
+ .int_from_float => try cg.airIntFromFloat(inst),
+ .fpext, .fptrunc => try cg.airFloatCast(inst),
+ .not => try cg.airNot(inst),
+
+ .array_to_slice => try cg.airArrayToSlice(inst),
+ .slice => try cg.airSlice(inst),
+ .aggregate_init => try cg.airAggregateInit(inst),
+ .memcpy => return cg.airMemcpy(inst),
+ .memmove => return cg.airMemmove(inst),
+
+ .slice_ptr => try cg.airSliceField(inst, 0),
+ .slice_len => try cg.airSliceField(inst, 1),
+ .slice_elem_ptr => try cg.airSliceElemPtr(inst),
+ .slice_elem_val => try cg.airSliceElemVal(inst),
+ .ptr_elem_ptr => try cg.airPtrElemPtr(inst),
+ .ptr_elem_val => try cg.airPtrElemVal(inst),
+ .array_elem_val => try cg.airArrayElemVal(inst),
+
+ .vector_store_elem => return cg.airVectorStoreElem(inst),
+
+ .set_union_tag => return cg.airSetUnionTag(inst),
+ .get_union_tag => try cg.airGetUnionTag(inst),
+ .union_init => try cg.airUnionInit(inst),
+
+ .struct_field_val => try cg.airStructFieldVal(inst),
+ .field_parent_ptr => try cg.airFieldParentPtr(inst),
+
+ .struct_field_ptr_index_0 => try cg.airStructFieldPtrIndex(inst, 0),
+ .struct_field_ptr_index_1 => try cg.airStructFieldPtrIndex(inst, 1),
+ .struct_field_ptr_index_2 => try cg.airStructFieldPtrIndex(inst, 2),
+ .struct_field_ptr_index_3 => try cg.airStructFieldPtrIndex(inst, 3),
+
+ .cmp_eq => try cg.airCmp(inst, .eq),
+ .cmp_neq => try cg.airCmp(inst, .neq),
+ .cmp_gt => try cg.airCmp(inst, .gt),
+ .cmp_gte => try cg.airCmp(inst, .gte),
+ .cmp_lt => try cg.airCmp(inst, .lt),
+ .cmp_lte => try cg.airCmp(inst, .lte),
+ .cmp_vector => try cg.airVectorCmp(inst),
+
+ .arg => cg.airArg(),
+ .alloc => try cg.airAlloc(inst),
+ // TODO: We probably need to have a special implementation of this for the C abi.
+ .ret_ptr => try cg.airAlloc(inst),
+ .block => try cg.airBlock(inst),
+
+ .load => try cg.airLoad(inst),
+ .store, .store_safe => return cg.airStore(inst),
+
+ .br => return cg.airBr(inst),
+ // For now just ignore this instruction. This effectively falls back on the old implementation,
+ // this doesn't change anything for us.
+ .repeat => return,
+ .breakpoint => return,
+ .cond_br => return cg.airCondBr(inst),
+ .loop => return cg.airLoop(inst),
+ .ret => return cg.airRet(inst),
+ .ret_safe => return cg.airRet(inst), // TODO
+ .ret_load => return cg.airRetLoad(inst),
+ .@"try" => try cg.airTry(inst),
+ .switch_br => return cg.airSwitchBr(inst),
+ .unreach, .trap => return cg.airUnreach(),
+
+ .dbg_empty_stmt => return,
+ .dbg_stmt => return cg.airDbgStmt(inst),
+ .dbg_inline_block => try cg.airDbgInlineBlock(inst),
+ .dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => return cg.airDbgVar(inst),
+
+ .unwrap_errunion_err => try cg.airErrUnionErr(inst),
+ .unwrap_errunion_payload => try cg.airErrUnionPayload(inst),
+ .wrap_errunion_err => try cg.airWrapErrUnionErr(inst),
+ .wrap_errunion_payload => try cg.airWrapErrUnionPayload(inst),
+
+ .is_null => try cg.airIsNull(inst, false, .is_null),
+ .is_non_null => try cg.airIsNull(inst, false, .is_non_null),
+ .is_null_ptr => try cg.airIsNull(inst, true, .is_null),
+ .is_non_null_ptr => try cg.airIsNull(inst, true, .is_non_null),
+ .is_err => try cg.airIsErr(inst, .is_err),
+ .is_non_err => try cg.airIsErr(inst, .is_non_err),
+
+ .optional_payload => try cg.airUnwrapOptional(inst),
+ .optional_payload_ptr => try cg.airUnwrapOptionalPtr(inst),
+ .wrap_optional => try cg.airWrapOptional(inst),
+
+ .assembly => try cg.airAssembly(inst),
+
+ .call => try cg.airCall(inst, .auto),
+ .call_always_tail => try cg.airCall(inst, .always_tail),
+ .call_never_tail => try cg.airCall(inst, .never_tail),
+ .call_never_inline => try cg.airCall(inst, .never_inline),
+
+ .work_item_id => try cg.airWorkItemId(inst),
+ .work_group_size => try cg.airWorkGroupSize(inst),
+ .work_group_id => try cg.airWorkGroupId(inst),
+
+ // zig fmt: on
+
+ else => |tag| return cg.todo("implement AIR tag {s}", .{@tagName(tag)}),
+ };
+
+ const result_id = maybe_result_id orelse return;
+ try cg.inst_results.putNoClobber(gpa, inst, result_id);
+}
+
+fn airBinOpSimple(cg: *CodeGen, inst: Air.Inst.Index, op: Opcode) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const result = try cg.buildBinary(op, lhs, rhs);
+ return try result.materialize(cg);
+}
+
+fn airShift(cg: *CodeGen, inst: Air.Inst.Index, unsigned: Opcode, signed: Opcode) !?Id {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+
+ if (cg.typeOf(bin_op.lhs).isVector(zcu) and !cg.typeOf(bin_op.rhs).isVector(zcu)) {
+ return cg.fail("vector shift with scalar rhs", .{});
+ }
+
+ const base = try cg.temporary(bin_op.lhs);
+ const shift = try cg.temporary(bin_op.rhs);
+
+ const result_ty = cg.typeOfIndex(inst);
+
+ const info = cg.arithmeticTypeInfo(result_ty);
+ switch (info.class) {
+ .composite_integer => return cg.todo("shift ops for composite integers", .{}),
+ .integer, .strange_integer => {},
+ .float, .bool => unreachable,
+ }
+
+ // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
+ // so just manually upcast it if required.
+
+ // Note: The sign may differ here between the shift and the base type, in case
+ // of an arithmetic right shift. SPIR-V still expects the same type,
+ // so in that case we have to cast convert to signed.
+ const casted_shift = try cg.buildConvert(base.ty.scalarType(zcu), shift);
+
+ const shifted = switch (info.signedness) {
+ .unsigned => try cg.buildBinary(unsigned, base, casted_shift),
+ .signed => try cg.buildBinary(signed, base, casted_shift),
+ };
+
+ const result = try cg.normalize(shifted, info);
+ return try result.materialize(cg);
+}
+
+const MinMax = enum { min, max };
+
+fn airMinMax(cg: *CodeGen, inst: Air.Inst.Index, op: MinMax) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const result = try cg.minMax(lhs, rhs, op);
+ return try result.materialize(cg);
+}
+
+fn minMax(cg: *CodeGen, lhs: Temporary, rhs: Temporary, op: MinMax) !Temporary {
+ const zcu = cg.module.zcu;
+ const target = zcu.getTarget();
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+
+ const v = cg.vectorization(.{ lhs, rhs });
+ const ops = v.components();
+ const results = cg.module.allocIds(ops);
+
+ const op_result_ty = lhs.ty.scalarType(zcu);
+ const op_result_ty_id = try cg.resolveType(op_result_ty, .direct);
+ const result_ty = try v.resultType(cg, lhs.ty);
+
+ const op_lhs = try v.prepare(cg, lhs);
+ const op_rhs = try v.prepare(cg, rhs);
+
+ const ext_inst: u32 = switch (target.os.tag) {
+ .opencl => switch (info.class) {
+ .float => switch (op) {
+ .min => 28, // fmin
+ .max => 27, // fmax
+ },
+ .integer,
+ .strange_integer,
+ .composite_integer,
+ => switch (info.signedness) {
+ .signed => switch (op) {
+ .min => 158, // s_min
+ .max => 156, // s_max
+ },
+ .unsigned => switch (op) {
+ .min => 159, // u_min
+ .max => 157, // u_max
+ },
+ },
+ .bool => unreachable,
+ },
+ .vulkan, .opengl => switch (info.class) {
+ .float => switch (op) {
+ .min => 37, // FMin
+ .max => 40, // FMax
+ },
+ .integer,
+ .strange_integer,
+ .composite_integer,
+ => switch (info.signedness) {
+ .signed => switch (op) {
+ .min => 39, // SMin
+ .max => 42, // SMax
+ },
+ .unsigned => switch (op) {
+ .min => 38, // UMin
+ .max => 41, // UMax
+ },
+ },
+ .bool => unreachable,
+ },
+ else => unreachable,
+ };
+
+ const set = try cg.importExtendedSet();
+ for (0..ops) |i| {
+ try cg.body.emit(cg.module.gpa, .OpExtInst, .{
+ .id_result_type = op_result_ty_id,
+ .id_result = results.at(i),
+ .set = set,
+ .instruction = .{ .inst = ext_inst },
+ .id_ref_4 = &.{ op_lhs.at(i), op_rhs.at(i) },
+ });
+ }
+
+ return v.finalize(result_ty, results);
+}
+
+/// This function normalizes values to a canonical representation
+/// after some arithmetic operation. This mostly consists of wrapping
+/// behavior for strange integers:
+/// - Unsigned integers are bitwise masked with a mask that only passes
+/// the valid bits through.
+/// - Signed integers are also sign extended if they are negative.
+/// All other values are returned unmodified (this makes strange integer
+/// wrapping easier to use in generic operations).
+fn normalize(cg: *CodeGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary {
+ const zcu = cg.module.zcu;
+ const ty = value.ty;
+ switch (info.class) {
+ .composite_integer, .integer, .bool, .float => return value,
+ .strange_integer => switch (info.signedness) {
+ .unsigned => {
+ const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1;
+ const mask_id = try cg.constInt(ty.scalarType(zcu), mask_value);
+ return try cg.buildBinary(.OpBitwiseAnd, value, Temporary.init(ty.scalarType(zcu), mask_id));
+ },
+ .signed => {
+ // Shift left and right so that we can copy the sight bit that way.
+ const shift_amt_id = try cg.constInt(ty.scalarType(zcu), info.backing_bits - info.bits);
+ const shift_amt: Temporary = .init(ty.scalarType(zcu), shift_amt_id);
+ const left = try cg.buildBinary(.OpShiftLeftLogical, value, shift_amt);
+ return try cg.buildBinary(.OpShiftRightArithmetic, left, shift_amt);
+ },
+ },
+ }
+}
+
+fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .integer, .strange_integer => {
+ switch (info.signedness) {
+ .unsigned => {
+ const result = try cg.buildBinary(.OpUDiv, lhs, rhs);
+ return try result.materialize(cg);
+ },
+ .signed => {},
+ }
+
+ // For signed integers:
+ // (a / b) - (a % b != 0 && a < 0 != b < 0);
+ // There shouldn't be any overflow issues.
+
+ const div = try cg.buildBinary(.OpSDiv, lhs, rhs);
+ const rem = try cg.buildBinary(.OpSRem, lhs, rhs);
+
+ const zero: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, 0));
+
+ const rem_is_not_zero = try cg.buildCmp(.OpINotEqual, rem, zero);
+
+ const result_negative = try cg.buildCmp(
+ .OpLogicalNotEqual,
+ try cg.buildCmp(.OpSLessThan, lhs, zero),
+ try cg.buildCmp(.OpSLessThan, rhs, zero),
+ );
+ const rem_is_not_zero_and_result_is_negative = try cg.buildBinary(
+ .OpLogicalAnd,
+ rem_is_not_zero,
+ result_negative,
+ );
+
+ const result = try cg.buildBinary(
+ .OpISub,
+ div,
+ try cg.intFromBool2(rem_is_not_zero_and_result_is_negative, div.ty),
+ );
+
+ return try result.materialize(cg);
+ },
+ .float => {
+ const div = try cg.buildBinary(.OpFDiv, lhs, rhs);
+ const result = try cg.buildUnary(.floor, div);
+ return try result.materialize(cg);
+ },
+ .bool => unreachable,
+ }
+}
+
+fn airDivTrunc(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .integer, .strange_integer => switch (info.signedness) {
+ .unsigned => {
+ const result = try cg.buildBinary(.OpUDiv, lhs, rhs);
+ return try result.materialize(cg);
+ },
+ .signed => {
+ const result = try cg.buildBinary(.OpSDiv, lhs, rhs);
+ return try result.materialize(cg);
+ },
+ },
+ .float => {
+ const div = try cg.buildBinary(.OpFDiv, lhs, rhs);
+ const result = try cg.buildUnary(.trunc, div);
+ return try result.materialize(cg);
+ },
+ .bool => unreachable,
+ }
+}
+
+fn airUnOpSimple(cg: *CodeGen, inst: Air.Inst.Index, op: UnaryOp) !?Id {
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand = try cg.temporary(un_op);
+ const result = try cg.buildUnary(op, operand);
+ return try result.materialize(cg);
+}
+
+fn airArithOp(
+ cg: *CodeGen,
+ inst: Air.Inst.Index,
+ comptime fop: Opcode,
+ comptime sop: Opcode,
+ comptime uop: Opcode,
+) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+
+ const result = switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .integer, .strange_integer => switch (info.signedness) {
+ .signed => try cg.buildBinary(sop, lhs, rhs),
+ .unsigned => try cg.buildBinary(uop, lhs, rhs),
+ },
+ .float => try cg.buildBinary(fop, lhs, rhs),
+ .bool => unreachable,
+ };
+
+ return try result.materialize(cg);
+}
+
+fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.temporary(ty_op.operand);
+ // Note: operand_ty may be signed, while ty is always unsigned!
+ const result_ty = cg.typeOfIndex(inst);
+ const result = try cg.abs(result_ty, operand);
+ return try result.materialize(cg);
+}
+
+fn abs(cg: *CodeGen, result_ty: Type, value: Temporary) !Temporary {
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const operand_info = cg.arithmeticTypeInfo(value.ty);
+
+ switch (operand_info.class) {
+ .float => return try cg.buildUnary(.f_abs, value),
+ .integer, .strange_integer => {
+ const abs_value = try cg.buildUnary(.i_abs, value);
+
+ switch (target.os.tag) {
+ .vulkan, .opengl => {
+ if (value.ty.intInfo(zcu).signedness == .signed) {
+ return cg.todo("perform bitcast after @abs", .{});
+ }
+ },
+ else => {},
+ }
+
+ return try cg.normalize(abs_value, cg.arithmeticTypeInfo(result_ty));
+ },
+ .composite_integer => unreachable, // TODO
+ .bool => unreachable,
+ }
+}
+
+fn airAddSubOverflow(
+ cg: *CodeGen,
+ inst: Air.Inst.Index,
+ comptime add: Opcode,
+ u_opcode: Opcode,
+ s_opcode: Opcode,
+) !?Id {
+ _ = s_opcode;
+ // Note: OpIAddCarry and OpISubBorrow are not really useful here: For unsigned numbers,
+ // there is in both cases only one extra operation required. For signed operations,
+ // the overflow bit is set then going from 0x80.. to 0x00.., but this doesn't actually
+ // normally set a carry bit. So the SPIR-V overflow operations are not particularly
+ // useful here.
+
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const lhs = try cg.temporary(extra.lhs);
+ const rhs = try cg.temporary(extra.rhs);
+
+ const result_ty = cg.typeOfIndex(inst);
+
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .strange_integer, .integer => {},
+ .float, .bool => unreachable,
+ }
+
+ const sum = try cg.buildBinary(add, lhs, rhs);
+ const result = try cg.normalize(sum, info);
+
+ const overflowed = switch (info.signedness) {
+ // Overflow happened if the result is smaller than either of the operands. It doesn't matter which.
+ // For subtraction the conditions need to be swapped.
+ .unsigned => try cg.buildCmp(u_opcode, result, lhs),
+ // For signed operations, we check the signs of the operands and the result.
+ .signed => blk: {
+ // Signed overflow detection using the sign bits of the operands and the result.
+ // For addition (a + b), overflow occurs if the operands have the same sign
+ // and the result's sign is different from the operands' sign.
+ // (sign(a) == sign(b)) && (sign(a) != sign(result))
+ // For subtraction (a - b), overflow occurs if the operands have different signs
+ // and the result's sign is different from the minuend's (a's) sign.
+ // (sign(a) != sign(b)) && (sign(a) != sign(result))
+ const zero: Temporary = .init(rhs.ty, try cg.constInt(rhs.ty, 0));
+
+ const lhs_is_neg = try cg.buildCmp(.OpSLessThan, lhs, zero);
+ const rhs_is_neg = try cg.buildCmp(.OpSLessThan, rhs, zero);
+ const result_is_neg = try cg.buildCmp(.OpSLessThan, result, zero);
+
+ const signs_match = try cg.buildCmp(.OpLogicalEqual, lhs_is_neg, rhs_is_neg);
+ const result_sign_differs = try cg.buildCmp(.OpLogicalNotEqual, lhs_is_neg, result_is_neg);
+
+ const overflow_condition = if (add == .OpIAdd)
+ signs_match
+ else // .OpISub
+ try cg.buildUnary(.l_not, signs_match);
+
+ break :blk try cg.buildCmp(.OpLogicalAnd, overflow_condition, result_sign_differs);
+ },
+ };
+
+ const ov = try cg.intFromBool(overflowed);
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, &.{ try result.materialize(cg), try ov.materialize(cg) });
+}
+
+fn airMulOverflow(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const pt = cg.pt;
+
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const lhs = try cg.temporary(extra.lhs);
+ const rhs = try cg.temporary(extra.rhs);
+
+ const result_ty = cg.typeOfIndex(inst);
+
+ const info = cg.arithmeticTypeInfo(lhs.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .strange_integer, .integer => {},
+ .float, .bool => unreachable,
+ }
+
+ // There are 3 cases which we have to deal with:
+ // - If info.bits < 32 / 2, we will upcast to 32 and check the higher bits
+ // - If info.bits > 32 / 2, we have to use extended multiplication
+ // - Additionally, if info.bits != 32, we'll have to check the high bits
+ // of the result too.
+
+ const largest_int_bits = cg.largestSupportedIntBits();
+ // If non-null, the number of bits that the multiplication should be performed in. If
+ // null, we have to use wide multiplication.
+ const maybe_op_ty_bits: ?u16 = switch (info.bits) {
+ 0 => unreachable,
+ 1...16 => 32,
+ 17...32 => if (largest_int_bits > 32) 64 else null, // Upcast if we can.
+ 33...64 => null, // Always use wide multiplication.
+ else => unreachable, // TODO: Composite integers
+ };
+
+ const result, const overflowed = switch (info.signedness) {
+ .unsigned => blk: {
+ if (maybe_op_ty_bits) |op_ty_bits| {
+ const op_ty = try pt.intType(.unsigned, op_ty_bits);
+ const casted_lhs = try cg.buildConvert(op_ty, lhs);
+ const casted_rhs = try cg.buildConvert(op_ty, rhs);
+
+ const full_result = try cg.buildBinary(.OpIMul, casted_lhs, casted_rhs);
+
+ const low_bits = try cg.buildConvert(lhs.ty, full_result);
+ const result = try cg.normalize(low_bits, info);
+
+ // Shift the result bits away to get the overflow bits.
+ const shift: Temporary = .init(full_result.ty, try cg.constInt(full_result.ty, info.bits));
+ const overflow = try cg.buildBinary(.OpShiftRightLogical, full_result, shift);
+
+ // Directly check if its zero in the op_ty without converting first.
+ const zero: Temporary = .init(full_result.ty, try cg.constInt(full_result.ty, 0));
+ const overflowed = try cg.buildCmp(.OpINotEqual, zero, overflow);
+
+ break :blk .{ result, overflowed };
+ }
+
+ const low_bits, const high_bits = try cg.buildWideMul(.unsigned, lhs, rhs);
+
+ // Truncate the result, if required.
+ const result = try cg.normalize(low_bits, info);
+
+ // Overflow happened if the high-bits of the result are non-zero OR if the
+ // high bits of the low word of the result (those outside the range of the
+ // int) are nonzero.
+ const zero: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, 0));
+ const high_overflowed = try cg.buildCmp(.OpINotEqual, zero, high_bits);
+
+ // If no overflow bits in low_bits, no extra work needs to be done.
+ if (info.backing_bits == info.bits) break :blk .{ result, high_overflowed };
+
+ // Shift the result bits away to get the overflow bits.
+ const shift: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, info.bits));
+ const low_overflow = try cg.buildBinary(.OpShiftRightLogical, low_bits, shift);
+ const low_overflowed = try cg.buildCmp(.OpINotEqual, zero, low_overflow);
+
+ const overflowed = try cg.buildCmp(.OpLogicalOr, low_overflowed, high_overflowed);
+
+ break :blk .{ result, overflowed };
+ },
+ .signed => blk: {
+ // - lhs >= 0, rhxs >= 0: expect positive; overflow should be 0
+ // - lhs == 0 : expect positive; overflow should be 0
+ // - rhs == 0: expect positive; overflow should be 0
+ // - lhs > 0, rhs < 0: expect negative; overflow should be -1
+ // - lhs < 0, rhs > 0: expect negative; overflow should be -1
+ // - lhs <= 0, rhs <= 0: expect positive; overflow should be 0
+ // ------
+ // overflow should be -1 when
+ // (lhs > 0 && rhs < 0) || (lhs < 0 && rhs > 0)
+
+ const zero: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, 0));
+ const lhs_negative = try cg.buildCmp(.OpSLessThan, lhs, zero);
+ const rhs_negative = try cg.buildCmp(.OpSLessThan, rhs, zero);
+ const lhs_positive = try cg.buildCmp(.OpSGreaterThan, lhs, zero);
+ const rhs_positive = try cg.buildCmp(.OpSGreaterThan, rhs, zero);
+
+ // Set to `true` if we expect -1.
+ const expected_overflow_bit = try cg.buildBinary(
+ .OpLogicalOr,
+ try cg.buildCmp(.OpLogicalAnd, lhs_positive, rhs_negative),
+ try cg.buildCmp(.OpLogicalAnd, lhs_negative, rhs_positive),
+ );
+
+ if (maybe_op_ty_bits) |op_ty_bits| {
+ const op_ty = try pt.intType(.signed, op_ty_bits);
+ // Assume normalized; sign bit is set. We want a sign extend.
+ const casted_lhs = try cg.buildConvert(op_ty, lhs);
+ const casted_rhs = try cg.buildConvert(op_ty, rhs);
+
+ const full_result = try cg.buildBinary(.OpIMul, casted_lhs, casted_rhs);
+
+ // Truncate to the result type.
+ const low_bits = try cg.buildConvert(lhs.ty, full_result);
+ const result = try cg.normalize(low_bits, info);
+
+ // Now, we need to check the overflow bits AND the sign
+ // bit for the expected overflow bits.
+ // To do that, shift out everything bit the sign bit and
+ // then check what remains.
+ const shift: Temporary = .init(full_result.ty, try cg.constInt(full_result.ty, info.bits - 1));
+ // Use SRA so that any sign bits are duplicated. Now we can just check if ALL bits are set
+ // for negative cases.
+ const overflow = try cg.buildBinary(.OpShiftRightArithmetic, full_result, shift);
+
+ const long_all_set: Temporary = .init(full_result.ty, try cg.constInt(full_result.ty, -1));
+ const long_zero: Temporary = .init(full_result.ty, try cg.constInt(full_result.ty, 0));
+ const mask = try cg.buildSelect(expected_overflow_bit, long_all_set, long_zero);
+
+ const overflowed = try cg.buildCmp(.OpINotEqual, mask, overflow);
+
+ break :blk .{ result, overflowed };
+ }
+
+ const low_bits, const high_bits = try cg.buildWideMul(.signed, lhs, rhs);
+
+ // Truncate result if required.
+ const result = try cg.normalize(low_bits, info);
+
+ const all_set: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, -1));
+ const mask = try cg.buildSelect(expected_overflow_bit, all_set, zero);
+
+ // Like with unsigned, overflow happened if high_bits are not the ones we expect,
+ // and we also need to check some ones from the low bits.
+
+ const high_overflowed = try cg.buildCmp(.OpINotEqual, mask, high_bits);
+
+ // If no overflow bits in low_bits, no extra work needs to be done.
+ // Careful, we still have to check the sign bit, so this branch
+ // only goes for i33 and such.
+ if (info.backing_bits == info.bits + 1) break :blk .{ result, high_overflowed };
+
+ // Shift the result bits away to get the overflow bits.
+ const shift: Temporary = .init(lhs.ty, try cg.constInt(lhs.ty, info.bits - 1));
+ // Use SRA so that any sign bits are duplicated. Now we can just check if ALL bits are set
+ // for negative cases.
+ const low_overflow = try cg.buildBinary(.OpShiftRightArithmetic, low_bits, shift);
+ const low_overflowed = try cg.buildCmp(.OpINotEqual, mask, low_overflow);
+
+ const overflowed = try cg.buildCmp(.OpLogicalOr, low_overflowed, high_overflowed);
+
+ break :blk .{ result, overflowed };
+ },
+ };
+
+ const ov = try cg.intFromBool(overflowed);
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, &.{ try result.materialize(cg), try ov.materialize(cg) });
+}
+
+fn airShlOverflow(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ if (cg.typeOf(extra.lhs).isVector(zcu) and !cg.typeOf(extra.rhs).isVector(zcu)) {
+ return cg.fail("vector shift with scalar rhs", .{});
+ }
+
+ const base = try cg.temporary(extra.lhs);
+ const shift = try cg.temporary(extra.rhs);
+
+ const result_ty = cg.typeOfIndex(inst);
+
+ const info = cg.arithmeticTypeInfo(base.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .integer, .strange_integer => {},
+ .float, .bool => unreachable,
+ }
+
+ // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
+ // so just manually upcast it if required.
+ const casted_shift = try cg.buildConvert(base.ty.scalarType(zcu), shift);
+
+ const left = try cg.buildBinary(.OpShiftLeftLogical, base, casted_shift);
+ const result = try cg.normalize(left, info);
+
+ const right = switch (info.signedness) {
+ .unsigned => try cg.buildBinary(.OpShiftRightLogical, result, casted_shift),
+ .signed => try cg.buildBinary(.OpShiftRightArithmetic, result, casted_shift),
+ };
+
+ const overflowed = try cg.buildCmp(.OpINotEqual, base, right);
+ const ov = try cg.intFromBool(overflowed);
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, &.{ try result.materialize(cg), try ov.materialize(cg) });
+}
+
+fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = cg.air.extraData(Air.Bin, pl_op.payload).data;
+
+ const a = try cg.temporary(extra.lhs);
+ const b = try cg.temporary(extra.rhs);
+ const c = try cg.temporary(pl_op.operand);
+
+ const result_ty = cg.typeOfIndex(inst);
+ const info = cg.arithmeticTypeInfo(result_ty);
+ assert(info.class == .float); // .mul_add is only emitted for floats
+
+ const result = try cg.buildFma(a, b, c);
+ return try result.materialize(cg);
+}
+
+fn airClzCtz(cg: *CodeGen, inst: Air.Inst.Index, op: UnaryOp) !?Id {
+ if (cg.liveness.isUnused(inst)) return null;
+
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.temporary(ty_op.operand);
+
+ const scalar_result_ty = cg.typeOfIndex(inst).scalarType(zcu);
+
+ const info = cg.arithmeticTypeInfo(operand.ty);
+ switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .integer, .strange_integer => {},
+ .float, .bool => unreachable,
+ }
+
+ const count = try cg.buildUnary(op, operand);
+
+ // Result of OpenCL ctz/clz returns operand.ty, and we want result_ty.
+ // result_ty is always large enough to hold the result, so we might have to down
+ // cast it.
+ const result = try cg.buildConvert(scalar_result_ty, count);
+ return try result.materialize(cg);
+}
+
+fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = cg.air.extraData(Air.Bin, pl_op.payload).data;
+ const pred = try cg.temporary(pl_op.operand);
+ const a = try cg.temporary(extra.lhs);
+ const b = try cg.temporary(extra.rhs);
+
+ const result = try cg.buildSelect(pred, a, b);
+ return try result.materialize(cg);
+}
+
+fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+
+ const operand_id = try cg.resolve(ty_op.operand);
+ const result_ty = cg.typeOfIndex(inst);
+
+ return try cg.constructCompositeSplat(result_ty, operand_id);
+}
+
+fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const reduce = cg.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
+ const operand = try cg.resolve(reduce.operand);
+ const operand_ty = cg.typeOf(reduce.operand);
+ const scalar_ty = operand_ty.scalarType(zcu);
+ const scalar_ty_id = try cg.resolveType(scalar_ty, .direct);
+ const info = cg.arithmeticTypeInfo(operand_ty);
+ const len = operand_ty.vectorLen(zcu);
+ const first = try cg.extractVectorComponent(scalar_ty, operand, 0);
+
+ switch (reduce.operation) {
+ .Min, .Max => |op| {
+ var result: Temporary = .init(scalar_ty, first);
+ const cmp_op: MinMax = switch (op) {
+ .Max => .max,
+ .Min => .min,
+ else => unreachable,
+ };
+ for (1..len) |i| {
+ const lhs = result;
+ const rhs_id = try cg.extractVectorComponent(scalar_ty, operand, @intCast(i));
+ const rhs: Temporary = .init(scalar_ty, rhs_id);
+
+ result = try cg.minMax(lhs, rhs, cmp_op);
+ }
+
+ return try result.materialize(cg);
+ },
+ else => {},
+ }
+
+ var result_id = first;
+
+ const opcode: Opcode = switch (info.class) {
+ .bool => switch (reduce.operation) {
+ .And => .OpLogicalAnd,
+ .Or => .OpLogicalOr,
+ .Xor => .OpLogicalNotEqual,
+ else => unreachable,
+ },
+ .strange_integer, .integer => switch (reduce.operation) {
+ .And => .OpBitwiseAnd,
+ .Or => .OpBitwiseOr,
+ .Xor => .OpBitwiseXor,
+ .Add => .OpIAdd,
+ .Mul => .OpIMul,
+ else => unreachable,
+ },
+ .float => switch (reduce.operation) {
+ .Add => .OpFAdd,
+ .Mul => .OpFMul,
+ else => unreachable,
+ },
+ .composite_integer => unreachable, // TODO
+ };
+
+ for (1..len) |i| {
+ const lhs = result_id;
+ const rhs = try cg.extractVectorComponent(scalar_ty, operand, @intCast(i));
+ result_id = cg.module.allocId();
+
+ try cg.body.emitRaw(cg.module.gpa, opcode, 4);
+ cg.body.writeOperand(Id, scalar_ty_id);
+ cg.body.writeOperand(Id, result_id);
+ cg.body.writeOperand(Id, lhs);
+ cg.body.writeOperand(Id, rhs);
+ }
+
+ return result_id;
+}
+
+fn airShuffleOne(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const gpa = zcu.gpa;
+
+ const unwrapped = cg.air.unwrapShuffleOne(zcu, inst);
+ const mask = unwrapped.mask;
+ const result_ty = unwrapped.result_ty;
+ const elem_ty = result_ty.childType(zcu);
+ const operand = try cg.resolve(unwrapped.operand);
+
+ const constituents = try gpa.alloc(Id, mask.len);
+ defer gpa.free(constituents);
+
+ for (constituents, mask) |*id, mask_elem| {
+ id.* = switch (mask_elem.unwrap()) {
+ .elem => |idx| try cg.extractVectorComponent(elem_ty, operand, idx),
+ .value => |val| try cg.constant(elem_ty, .fromInterned(val), .direct),
+ };
+ }
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, constituents);
+}
+
+fn airShuffleTwo(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const gpa = zcu.gpa;
+
+ const unwrapped = cg.air.unwrapShuffleTwo(zcu, inst);
+ const mask = unwrapped.mask;
+ const result_ty = unwrapped.result_ty;
+ const elem_ty = result_ty.childType(zcu);
+ const elem_ty_id = try cg.resolveType(elem_ty, .direct);
+ const operand_a = try cg.resolve(unwrapped.operand_a);
+ const operand_b = try cg.resolve(unwrapped.operand_b);
+
+ const constituents = try gpa.alloc(Id, mask.len);
+ defer gpa.free(constituents);
+
+ for (constituents, mask) |*id, mask_elem| {
+ id.* = switch (mask_elem.unwrap()) {
+ .a_elem => |idx| try cg.extractVectorComponent(elem_ty, operand_a, idx),
+ .b_elem => |idx| try cg.extractVectorComponent(elem_ty, operand_b, idx),
+ .undef => try cg.module.constUndef(elem_ty_id),
+ };
+ }
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, constituents);
+}
+
+fn indicesToIds(cg: *CodeGen, indices: []const u32) ![]Id {
+ const gpa = cg.module.gpa;
+ const ids = try gpa.alloc(Id, indices.len);
+ errdefer gpa.free(ids);
+ for (indices, ids) |index, *id| {
+ id.* = try cg.constInt(.u32, index);
+ }
+
+ return ids;
+}
+
+fn accessChainId(
+ cg: *CodeGen,
+ result_ty_id: Id,
+ base: Id,
+ indices: []const Id,
+) !Id {
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpInBoundsAccessChain, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .base = base,
+ .indexes = indices,
+ });
+ return result_id;
+}
+
+/// AccessChain is essentially PtrAccessChain with 0 as initial argument. The effective
+/// difference lies in whether the resulting type of the first dereference will be the
+/// same as that of the base pointer, or that of a dereferenced base pointer. AccessChain
+/// is the latter and PtrAccessChain is the former.
+fn accessChain(
+ cg: *CodeGen,
+ result_ty_id: Id,
+ base: Id,
+ indices: []const u32,
+) !Id {
+ const gpa = cg.module.gpa;
+ const ids = try cg.indicesToIds(indices);
+ defer gpa.free(ids);
+ return try cg.accessChainId(result_ty_id, base, ids);
+}
+
+fn ptrAccessChain(
+ cg: *CodeGen,
+ result_ty_id: Id,
+ base: Id,
+ element: Id,
+ indices: []const u32,
+) !Id {
+ const gpa = cg.module.gpa;
+ const target = cg.module.zcu.getTarget();
+ const ids = try cg.indicesToIds(indices);
+ defer gpa.free(ids);
+
+ const result_id = cg.module.allocId();
+ switch (target.os.tag) {
+ .opencl, .amdhsa => {
+ try cg.body.emit(cg.module.gpa, .OpInBoundsPtrAccessChain, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .base = base,
+ .element = element,
+ .indexes = ids,
+ });
+ },
+ else => {
+ try cg.body.emit(cg.module.gpa, .OpPtrAccessChain, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .base = base,
+ .element = element,
+ .indexes = ids,
+ });
+ },
+ }
+ return result_id;
+}
+
+fn ptrAdd(cg: *CodeGen, result_ty: Type, ptr_ty: Type, ptr_id: Id, offset_id: Id) !Id {
+ const zcu = cg.module.zcu;
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+
+ switch (ptr_ty.ptrSize(zcu)) {
+ .one => {
+ // Pointer to array
+ // TODO: Is this correct?
+ return try cg.accessChainId(result_ty_id, ptr_id, &.{offset_id});
+ },
+ .c, .many => {
+ return try cg.ptrAccessChain(result_ty_id, ptr_id, offset_id, &.{});
+ },
+ .slice => {
+ // TODO: This is probably incorrect. A slice should be returned here, though this is what llvm does.
+ const slice_ptr_id = try cg.extractField(result_ty, ptr_id, 0);
+ return try cg.ptrAccessChain(result_ty_id, slice_ptr_id, offset_id, &.{});
+ },
+ }
+}
+
+fn airPtrAdd(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr_id = try cg.resolve(bin_op.lhs);
+ const offset_id = try cg.resolve(bin_op.rhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const result_ty = cg.typeOfIndex(inst);
+
+ return try cg.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id);
+}
+
+fn airPtrSub(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr_id = try cg.resolve(bin_op.lhs);
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const offset_id = try cg.resolve(bin_op.rhs);
+ const offset_ty = cg.typeOf(bin_op.rhs);
+ const offset_ty_id = try cg.resolveType(offset_ty, .direct);
+ const result_ty = cg.typeOfIndex(inst);
+
+ const negative_offset_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpSNegate, .{
+ .id_result_type = offset_ty_id,
+ .id_result = negative_offset_id,
+ .operand = offset_id,
+ });
+ return try cg.ptrAdd(result_ty, ptr_ty, ptr_id, negative_offset_id);
+}
+
+fn cmp(
+ cg: *CodeGen,
+ op: std.math.CompareOperator,
+ lhs: Temporary,
+ rhs: Temporary,
+) !Temporary {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const scalar_ty = lhs.ty.scalarType(zcu);
+ const is_vector = lhs.ty.isVector(zcu);
+
+ switch (scalar_ty.zigTypeTag(zcu)) {
+ .int, .bool, .float => {},
+ .@"enum" => {
+ assert(!is_vector);
+ const ty = lhs.ty.intTagType(zcu);
+ return try cg.cmp(op, lhs.pun(ty), rhs.pun(ty));
+ },
+ .@"struct" => {
+ const struct_ty = zcu.typeToPackedStruct(scalar_ty).?;
+ const ty: Type = .fromInterned(struct_ty.backingIntTypeUnordered(ip));
+ return try cg.cmp(op, lhs.pun(ty), rhs.pun(ty));
+ },
+ .error_set => {
+ assert(!is_vector);
+ const err_int_ty = try pt.errorIntType();
+ return try cg.cmp(op, lhs.pun(err_int_ty), rhs.pun(err_int_ty));
+ },
+ .pointer => {
+ assert(!is_vector);
+ // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
+ // currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using
+ // OpConvertPtrToU...
+
+ const usize_ty_id = try cg.resolveType(.usize, .direct);
+
+ const lhs_int_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = lhs_int_id,
+ .pointer = try lhs.materialize(cg),
+ });
+
+ const rhs_int_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = rhs_int_id,
+ .pointer = try rhs.materialize(cg),
+ });
+
+ const lhs_int: Temporary = .init(.usize, lhs_int_id);
+ const rhs_int: Temporary = .init(.usize, rhs_int_id);
+ return try cg.cmp(op, lhs_int, rhs_int);
+ },
+ .optional => {
+ assert(!is_vector);
+
+ const ty = lhs.ty;
+
+ const payload_ty = ty.optionalChild(zcu);
+ if (ty.optionalReprIsPayload(zcu)) {
+ assert(payload_ty.hasRuntimeBitsIgnoreComptime(zcu));
+ assert(!payload_ty.isSlice(zcu));
+
+ return try cg.cmp(op, lhs.pun(payload_ty), rhs.pun(payload_ty));
+ }
+
+ const lhs_id = try lhs.materialize(cg);
+ const rhs_id = try rhs.materialize(cg);
+
+ const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ try cg.extractField(.bool, lhs_id, 1)
+ else
+ try cg.convertToDirect(.bool, lhs_id);
+
+ const rhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ try cg.extractField(.bool, rhs_id, 1)
+ else
+ try cg.convertToDirect(.bool, rhs_id);
+
+ const lhs_valid: Temporary = .init(.bool, lhs_valid_id);
+ const rhs_valid: Temporary = .init(.bool, rhs_valid_id);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ return try cg.cmp(op, lhs_valid, rhs_valid);
+ }
+
+ // a = lhs_valid
+ // b = rhs_valid
+ // c = lhs_pl == rhs_pl
+ //
+ // For op == .eq we have:
+ // a == b && a -> c
+ // = a == b && (!a || c)
+ //
+ // For op == .neq we have
+ // a == b && a -> c
+ // = !(a == b && a -> c)
+ // = a != b || !(a -> c
+ // = a != b || !(!a || c)
+ // = a != b || a && !c
+
+ const lhs_pl_id = try cg.extractField(payload_ty, lhs_id, 0);
+ const rhs_pl_id = try cg.extractField(payload_ty, rhs_id, 0);
+
+ const lhs_pl: Temporary = .init(payload_ty, lhs_pl_id);
+ const rhs_pl: Temporary = .init(payload_ty, rhs_pl_id);
+
+ return switch (op) {
+ .eq => try cg.buildBinary(
+ .OpLogicalAnd,
+ try cg.cmp(.eq, lhs_valid, rhs_valid),
+ try cg.buildBinary(
+ .OpLogicalOr,
+ try cg.buildUnary(.l_not, lhs_valid),
+ try cg.cmp(.eq, lhs_pl, rhs_pl),
+ ),
+ ),
+ .neq => try cg.buildBinary(
+ .OpLogicalOr,
+ try cg.cmp(.neq, lhs_valid, rhs_valid),
+ try cg.buildBinary(
+ .OpLogicalAnd,
+ lhs_valid,
+ try cg.cmp(.neq, lhs_pl, rhs_pl),
+ ),
+ ),
+ else => unreachable,
+ };
+ },
+ else => |ty| return cg.todo("implement cmp operation for '{s}' type", .{@tagName(ty)}),
+ }
+
+ const info = cg.arithmeticTypeInfo(scalar_ty);
+ const pred: Opcode = switch (info.class) {
+ .composite_integer => unreachable, // TODO
+ .float => switch (op) {
+ .eq => .OpFOrdEqual,
+ .neq => .OpFUnordNotEqual,
+ .lt => .OpFOrdLessThan,
+ .lte => .OpFOrdLessThanEqual,
+ .gt => .OpFOrdGreaterThan,
+ .gte => .OpFOrdGreaterThanEqual,
+ },
+ .bool => switch (op) {
+ .eq => .OpLogicalEqual,
+ .neq => .OpLogicalNotEqual,
+ else => unreachable,
+ },
+ .integer, .strange_integer => switch (info.signedness) {
+ .signed => switch (op) {
+ .eq => .OpIEqual,
+ .neq => .OpINotEqual,
+ .lt => .OpSLessThan,
+ .lte => .OpSLessThanEqual,
+ .gt => .OpSGreaterThan,
+ .gte => .OpSGreaterThanEqual,
+ },
+ .unsigned => switch (op) {
+ .eq => .OpIEqual,
+ .neq => .OpINotEqual,
+ .lt => .OpULessThan,
+ .lte => .OpULessThanEqual,
+ .gt => .OpUGreaterThan,
+ .gte => .OpUGreaterThanEqual,
+ },
+ },
+ };
+
+ return try cg.buildCmp(pred, lhs, rhs);
+}
+
+fn airCmp(
+ cg: *CodeGen,
+ inst: Air.Inst.Index,
+ comptime op: std.math.CompareOperator,
+) !?Id {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const lhs = try cg.temporary(bin_op.lhs);
+ const rhs = try cg.temporary(bin_op.rhs);
+
+ const result = try cg.cmp(op, lhs, rhs);
+ return try result.materialize(cg);
+}
+
+fn airVectorCmp(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const vec_cmp = cg.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ const lhs = try cg.temporary(vec_cmp.lhs);
+ const rhs = try cg.temporary(vec_cmp.rhs);
+ const op = vec_cmp.compareOperator();
+
+ const result = try cg.cmp(op, lhs, rhs);
+ return try result.materialize(cg);
+}
+
+/// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation.
+fn bitCast(
+ cg: *CodeGen,
+ dst_ty: Type,
+ src_ty: Type,
+ src_id: Id,
+) !Id {
+ const zcu = cg.module.zcu;
+ const src_ty_id = try cg.resolveType(src_ty, .direct);
+ const dst_ty_id = try cg.resolveType(dst_ty, .direct);
+
+ const result_id = blk: {
+ if (src_ty_id == dst_ty_id) break :blk src_id;
+
+ // TODO: Some more cases are missing here
+ // See fn bitCast in llvm.zig
+
+ if (src_ty.zigTypeTag(zcu) == .int and dst_ty.isPtrAtRuntime(zcu)) {
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpConvertUToPtr, .{
+ .id_result_type = dst_ty_id,
+ .id_result = result_id,
+ .integer_value = src_id,
+ });
+ break :blk result_id;
+ }
+
+ // We can only use OpBitcast for specific conversions: between numerical types, and
+ // between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
+ // otherwise use a temporary and perform a pointer cast.
+ const can_bitcast = (src_ty.isNumeric(zcu) and dst_ty.isNumeric(zcu)) or (src_ty.isPtrAtRuntime(zcu) and dst_ty.isPtrAtRuntime(zcu));
+ if (can_bitcast) {
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = dst_ty_id,
+ .id_result = result_id,
+ .operand = src_id,
+ });
+
+ break :blk result_id;
+ }
+
+ const dst_ptr_ty_id = try cg.module.ptrType(dst_ty_id, .function);
+
+ const tmp_id = try cg.alloc(src_ty, .{ .storage_class = .function });
+ try cg.store(src_ty, tmp_id, src_id, .{});
+ const casted_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = dst_ptr_ty_id,
+ .id_result = casted_ptr_id,
+ .operand = tmp_id,
+ });
+ break :blk try cg.load(dst_ty, casted_ptr_id, .{});
+ };
+
+ // Because strange integers use sign-extended representation, we may need to normalize
+ // the result here.
+ // TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break
+ // should we change the representation of strange integers?
+ if (dst_ty.zigTypeTag(zcu) == .int) {
+ const info = cg.arithmeticTypeInfo(dst_ty);
+ const result = try cg.normalize(Temporary.init(dst_ty, result_id), info);
+ return try result.materialize(cg);
+ }
+
+ return result_id;
+}
+
+fn airBitCast(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_ty = cg.typeOf(ty_op.operand);
+ const result_ty = cg.typeOfIndex(inst);
+ if (operand_ty.toIntern() == .bool_type) {
+ const operand = try cg.temporary(ty_op.operand);
+ const result = try cg.intFromBool(operand);
+ return try result.materialize(cg);
+ }
+ const operand_id = try cg.resolve(ty_op.operand);
+ return try cg.bitCast(result_ty, operand_ty, operand_id);
+}
+
+fn airIntCast(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const src = try cg.temporary(ty_op.operand);
+ const dst_ty = cg.typeOfIndex(inst);
+
+ const src_info = cg.arithmeticTypeInfo(src.ty);
+ const dst_info = cg.arithmeticTypeInfo(dst_ty);
+
+ if (src_info.backing_bits == dst_info.backing_bits) {
+ return try src.materialize(cg);
+ }
+
+ const converted = try cg.buildConvert(dst_ty, src);
+
+ // Make sure to normalize the result if shrinking.
+ // Because strange ints are sign extended in their backing
+ // type, we don't need to normalize when growing the type. The
+ // representation is already the same.
+ const result = if (dst_info.bits < src_info.bits)
+ try cg.normalize(converted, dst_info)
+ else
+ converted;
+
+ return try result.materialize(cg);
+}
+
+fn intFromPtr(cg: *CodeGen, operand_id: Id) !Id {
+ const result_type_id = try cg.resolveType(.usize, .direct);
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpConvertPtrToU, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .pointer = operand_id,
+ });
+ return result_id;
+}
+
+fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_ty = cg.typeOf(ty_op.operand);
+ const operand_id = try cg.resolve(ty_op.operand);
+ const result_ty = cg.typeOfIndex(inst);
+ return try cg.floatFromInt(result_ty, operand_ty, operand_id);
+}
+
+fn floatFromInt(cg: *CodeGen, result_ty: Type, operand_ty: Type, operand_id: Id) !Id {
+ const operand_info = cg.arithmeticTypeInfo(operand_ty);
+ const result_id = cg.module.allocId();
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ switch (operand_info.signedness) {
+ .signed => try cg.body.emit(cg.module.gpa, .OpConvertSToF, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .signed_value = operand_id,
+ }),
+ .unsigned => try cg.body.emit(cg.module.gpa, .OpConvertUToF, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .unsigned_value = operand_id,
+ }),
+ }
+ return result_id;
+}
+
+fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_id = try cg.resolve(ty_op.operand);
+ const result_ty = cg.typeOfIndex(inst);
+ return try cg.intFromFloat(result_ty, operand_id);
+}
+
+fn intFromFloat(cg: *CodeGen, result_ty: Type, operand_id: Id) !Id {
+ const result_info = cg.arithmeticTypeInfo(result_ty);
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ const result_id = cg.module.allocId();
+ switch (result_info.signedness) {
+ .signed => try cg.body.emit(cg.module.gpa, .OpConvertFToS, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .float_value = operand_id,
+ }),
+ .unsigned => try cg.body.emit(cg.module.gpa, .OpConvertFToU, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .float_value = operand_id,
+ }),
+ }
+ return result_id;
+}
+
+fn airFloatCast(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.temporary(ty_op.operand);
+ const dest_ty = cg.typeOfIndex(inst);
+ const result = try cg.buildConvert(dest_ty, operand);
+ return try result.materialize(cg);
+}
+
+fn airNot(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand = try cg.temporary(ty_op.operand);
+ const result_ty = cg.typeOfIndex(inst);
+ const info = cg.arithmeticTypeInfo(result_ty);
+
+ const result = switch (info.class) {
+ .bool => try cg.buildUnary(.l_not, operand),
+ .float => unreachable,
+ .composite_integer => unreachable, // TODO
+ .strange_integer, .integer => blk: {
+ const complement = try cg.buildUnary(.bit_not, operand);
+ break :blk try cg.normalize(complement, info);
+ },
+ };
+
+ return try result.materialize(cg);
+}
+
+fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const array_ptr_ty = cg.typeOf(ty_op.operand);
+ const array_ty = array_ptr_ty.childType(zcu);
+ const slice_ty = cg.typeOfIndex(inst);
+ const elem_ptr_ty = slice_ty.slicePtrFieldType(zcu);
+
+ const elem_ptr_ty_id = try cg.resolveType(elem_ptr_ty, .direct);
+
+ const array_ptr_id = try cg.resolve(ty_op.operand);
+ const len_id = try cg.constInt(.usize, array_ty.arrayLen(zcu));
+
+ const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type.
+ try cg.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
+ else
+ // Convert the pointer-to-array to a pointer to the first element.
+ try cg.accessChain(elem_ptr_ty_id, array_ptr_id, &.{0});
+
+ const slice_ty_id = try cg.resolveType(slice_ty, .direct);
+ return try cg.constructComposite(slice_ty_id, &.{ elem_ptr_id, len_id });
+}
+
+fn airSlice(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr_id = try cg.resolve(bin_op.lhs);
+ const len_id = try cg.resolve(bin_op.rhs);
+ const slice_ty = cg.typeOfIndex(inst);
+ const slice_ty_id = try cg.resolveType(slice_ty, .direct);
+ return try cg.constructComposite(slice_ty_id, &.{ ptr_id, len_id });
+}
+
+fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const gpa = cg.module.gpa;
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const target = cg.module.zcu.getTarget();
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const result_ty = cg.typeOfIndex(inst);
+ const len: usize = @intCast(result_ty.arrayLen(zcu));
+ const elements: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[ty_pl.payload..][0..len]);
+
+ switch (result_ty.zigTypeTag(zcu)) {
+ .@"struct" => {
+ if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
+ comptime assert(Type.packed_struct_layout_version == 2);
+ const backing_int_ty: Type = .fromInterned(struct_type.backingIntTypeUnordered(ip));
+ var running_int_id = try cg.constInt(backing_int_ty, 0);
+ var running_bits: u16 = 0;
+ for (struct_type.field_types.get(ip), elements) |field_ty_ip, element| {
+ const field_ty: Type = .fromInterned(field_ty_ip);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ const field_id = try cg.resolve(element);
+ const ty_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const field_int_ty = try cg.pt.intType(.unsigned, ty_bit_size);
+ const field_int_id = blk: {
+ if (field_ty.isPtrAtRuntime(zcu)) {
+ assert(target.cpu.arch == .spirv64 and
+ field_ty.ptrAddressSpace(zcu) == .storage_buffer);
+ break :blk try cg.intFromPtr(field_id);
+ }
+ break :blk try cg.bitCast(field_int_ty, field_ty, field_id);
+ };
+ const shift_rhs = try cg.constInt(backing_int_ty, running_bits);
+ const extended_int_conv = try cg.buildConvert(backing_int_ty, .{
+ .ty = field_int_ty,
+ .value = .{ .singleton = field_int_id },
+ });
+ const shifted = try cg.buildBinary(.OpShiftLeftLogical, extended_int_conv, .{
+ .ty = backing_int_ty,
+ .value = .{ .singleton = shift_rhs },
+ });
+ const running_int_tmp = try cg.buildBinary(
+ .OpBitwiseOr,
+ .{ .ty = backing_int_ty, .value = .{ .singleton = running_int_id } },
+ shifted,
+ );
+ running_int_id = try running_int_tmp.materialize(cg);
+ running_bits += ty_bit_size;
+ }
+ return running_int_id;
+ }
+
+ const types = try gpa.alloc(Type, elements.len);
+ defer gpa.free(types);
+ const constituents = try gpa.alloc(Id, elements.len);
+ defer gpa.free(constituents);
+ var index: usize = 0;
+
+ switch (ip.indexToKey(result_ty.toIntern())) {
+ .tuple_type => |tuple| {
+ for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
+ assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu));
+
+ const id = try cg.resolve(element);
+ types[index] = .fromInterned(field_ty);
+ constituents[index] = try cg.convertToIndirect(.fromInterned(field_ty), id);
+ index += 1;
+ }
+ },
+ .struct_type => {
+ const struct_type = ip.loadStructType(result_ty.toIntern());
+ var it = struct_type.iterateRuntimeOrder(ip);
+ for (elements, 0..) |element, i| {
+ const field_index = it.next().?;
+ if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
+ assert(field_ty.hasRuntimeBitsIgnoreComptime(zcu));
+
+ const id = try cg.resolve(element);
+ types[index] = field_ty;
+ constituents[index] = try cg.convertToIndirect(field_ty, id);
+ index += 1;
+ }
+ },
+ else => unreachable,
+ }
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, constituents[0..index]);
+ },
+ .vector => {
+ const n_elems = result_ty.vectorLen(zcu);
+ const elem_ids = try gpa.alloc(Id, n_elems);
+ defer gpa.free(elem_ids);
+
+ for (elements, 0..) |element, i| {
+ elem_ids[i] = try cg.resolve(element);
+ }
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, elem_ids);
+ },
+ .array => {
+ const array_info = result_ty.arrayInfo(zcu);
+ const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu));
+ const elem_ids = try gpa.alloc(Id, n_elems);
+ defer gpa.free(elem_ids);
+
+ for (elements, 0..) |element, i| {
+ const id = try cg.resolve(element);
+ elem_ids[i] = try cg.convertToIndirect(array_info.elem_type, id);
+ }
+
+ if (array_info.sentinel) |sentinel_val| {
+ elem_ids[n_elems - 1] = try cg.constant(array_info.elem_type, sentinel_val, .indirect);
+ }
+
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+ return try cg.constructComposite(result_ty_id, elem_ids);
+ },
+ else => unreachable,
+ }
+}
+
+fn sliceOrArrayLen(cg: *CodeGen, operand_id: Id, ty: Type) !Id {
+ const zcu = cg.module.zcu;
+ switch (ty.ptrSize(zcu)) {
+ .slice => return cg.extractField(.usize, operand_id, 1),
+ .one => {
+ const array_ty = ty.childType(zcu);
+ const elem_ty = array_ty.childType(zcu);
+ const abi_size = elem_ty.abiSize(zcu);
+ const size = array_ty.arrayLenIncludingSentinel(zcu) * abi_size;
+ return try cg.constInt(.usize, size);
+ },
+ .many, .c => unreachable,
+ }
+}
+
+fn sliceOrArrayPtr(cg: *CodeGen, operand_id: Id, ty: Type) !Id {
+ const zcu = cg.module.zcu;
+ if (ty.isSlice(zcu)) {
+ const ptr_ty = ty.slicePtrFieldType(zcu);
+ return cg.extractField(ptr_ty, operand_id, 0);
+ }
+ return operand_id;
+}
+
+fn airMemcpy(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const dest_slice = try cg.resolve(bin_op.lhs);
+ const src_slice = try cg.resolve(bin_op.rhs);
+ const dest_ty = cg.typeOf(bin_op.lhs);
+ const src_ty = cg.typeOf(bin_op.rhs);
+ const dest_ptr = try cg.sliceOrArrayPtr(dest_slice, dest_ty);
+ const src_ptr = try cg.sliceOrArrayPtr(src_slice, src_ty);
+ const len = try cg.sliceOrArrayLen(dest_slice, dest_ty);
+ try cg.body.emit(cg.module.gpa, .OpCopyMemorySized, .{
+ .target = dest_ptr,
+ .source = src_ptr,
+ .size = len,
+ });
+}
+
+fn airMemmove(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return cg.fail("TODO implement airMemcpy for spirv", .{});
+}
+
+fn airSliceField(cg: *CodeGen, inst: Air.Inst.Index, field: u32) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const field_ty = cg.typeOfIndex(inst);
+ const operand_id = try cg.resolve(ty_op.operand);
+ return try cg.extractField(field_ty, operand_id, field);
+}
+
+fn airSliceElemPtr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+ const slice_ty = cg.typeOf(bin_op.lhs);
+ if (!slice_ty.isVolatilePtr(zcu) and cg.liveness.isUnused(inst)) return null;
+
+ const slice_id = try cg.resolve(bin_op.lhs);
+ const index_id = try cg.resolve(bin_op.rhs);
+
+ const ptr_ty = cg.typeOfIndex(inst);
+ const ptr_ty_id = try cg.resolveType(ptr_ty, .direct);
+
+ const slice_ptr = try cg.extractField(ptr_ty, slice_id, 0);
+ return try cg.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
+}
+
+fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const slice_ty = cg.typeOf(bin_op.lhs);
+ if (!slice_ty.isVolatilePtr(zcu) and cg.liveness.isUnused(inst)) return null;
+
+ const slice_id = try cg.resolve(bin_op.lhs);
+ const index_id = try cg.resolve(bin_op.rhs);
+
+ const ptr_ty = slice_ty.slicePtrFieldType(zcu);
+ const ptr_ty_id = try cg.resolveType(ptr_ty, .direct);
+
+ const slice_ptr = try cg.extractField(ptr_ty, slice_id, 0);
+ const elem_ptr = try cg.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
+ return try cg.load(slice_ty.childType(zcu), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(zcu) });
+}
+
+fn ptrElemPtr(cg: *CodeGen, ptr_ty: Type, ptr_id: Id, index_id: Id) !Id {
+ const zcu = cg.module.zcu;
+ // Construct new pointer type for the resulting pointer
+ const elem_ty = ptr_ty.elemType2(zcu); // use elemType() so that we get T for *[N]T.
+ const elem_ty_id = try cg.resolveType(elem_ty, .indirect);
+ const elem_ptr_ty_id = try cg.module.ptrType(elem_ty_id, cg.module.storageClass(ptr_ty.ptrAddressSpace(zcu)));
+ if (ptr_ty.isSinglePointer(zcu)) {
+ // Pointer-to-array. In this case, the resulting pointer is not of the same type
+ // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
+ return try cg.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id});
+ } else {
+ // Resulting pointer type is the same as the ptr_ty, so use ptrAccessChain
+ return try cg.ptrAccessChain(elem_ptr_ty_id, ptr_id, index_id, &.{});
+ }
+}
+
+fn airPtrElemPtr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
+ const src_ptr_ty = cg.typeOf(bin_op.lhs);
+ const elem_ty = src_ptr_ty.childType(zcu);
+ const ptr_id = try cg.resolve(bin_op.lhs);
+
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const dst_ptr_ty = cg.typeOfIndex(inst);
+ return try cg.bitCast(dst_ptr_ty, src_ptr_ty, ptr_id);
+ }
+
+ const index_id = try cg.resolve(bin_op.rhs);
+ return try cg.ptrElemPtr(src_ptr_ty, ptr_id, index_id);
+}
+
+fn airArrayElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const array_ty = cg.typeOf(bin_op.lhs);
+ const elem_ty = array_ty.childType(zcu);
+ const array_id = try cg.resolve(bin_op.lhs);
+ const index_id = try cg.resolve(bin_op.rhs);
+
+ // SPIR-V doesn't have an array indexing function for some damn reason.
+ // For now, just generate a temporary and use that.
+ // TODO: This backend probably also should use isByRef from llvm...
+
+ const is_vector = array_ty.isVector(zcu);
+
+ const elem_repr: Repr = if (is_vector) .direct else .indirect;
+ const array_ty_id = try cg.resolveType(array_ty, .direct);
+ const elem_ty_id = try cg.resolveType(elem_ty, elem_repr);
+ const ptr_array_ty_id = try cg.module.ptrType(array_ty_id, .function);
+ const ptr_elem_ty_id = try cg.module.ptrType(elem_ty_id, .function);
+
+ const tmp_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpVariable, .{
+ .id_result_type = ptr_array_ty_id,
+ .id_result = tmp_id,
+ .storage_class = .function,
+ });
+
+ try cg.body.emit(cg.module.gpa, .OpStore, .{
+ .pointer = tmp_id,
+ .object = array_id,
+ });
+
+ const elem_ptr_id = try cg.accessChainId(ptr_elem_ty_id, tmp_id, &.{index_id});
+
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpLoad, .{
+ .id_result_type = try cg.resolveType(elem_ty, elem_repr),
+ .id_result = result_id,
+ .pointer = elem_ptr_id,
+ });
+
+ if (is_vector) {
+ // Result is already in direct representation
+ return result_id;
+ }
+
+ // This is an array type; the elements are stored in indirect representation.
+ // We have to convert the type to direct.
+
+ return try cg.convertToDirect(elem_ty, result_id);
+}
+
+fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const elem_ty = cg.typeOfIndex(inst);
+ const ptr_id = try cg.resolve(bin_op.lhs);
+ const index_id = try cg.resolve(bin_op.rhs);
+ const elem_ptr_id = try cg.ptrElemPtr(ptr_ty, ptr_id, index_id);
+ return try cg.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
+}
+
+fn airVectorStoreElem(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const data = cg.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
+ const extra = cg.air.extraData(Air.Bin, data.payload).data;
+
+ const vector_ptr_ty = cg.typeOf(data.vector_ptr);
+ const vector_ty = vector_ptr_ty.childType(zcu);
+ const scalar_ty = vector_ty.scalarType(zcu);
+
+ const scalar_ty_id = try cg.resolveType(scalar_ty, .indirect);
+ const storage_class = cg.module.storageClass(vector_ptr_ty.ptrAddressSpace(zcu));
+ const scalar_ptr_ty_id = try cg.module.ptrType(scalar_ty_id, storage_class);
+
+ const vector_ptr = try cg.resolve(data.vector_ptr);
+ const index = try cg.resolve(extra.lhs);
+ const operand = try cg.resolve(extra.rhs);
+
+ const elem_ptr_id = try cg.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
+ try cg.store(scalar_ty, elem_ptr_id, operand, .{
+ .is_volatile = vector_ptr_ty.isVolatilePtr(zcu),
+ });
+}
+
+fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const un_ptr_ty = cg.typeOf(bin_op.lhs);
+ const un_ty = un_ptr_ty.childType(zcu);
+ const layout = cg.unionLayout(un_ty);
+
+ if (layout.tag_size == 0) return;
+
+ const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
+ const tag_ty_id = try cg.resolveType(tag_ty, .indirect);
+ const tag_ptr_ty_id = try cg.module.ptrType(tag_ty_id, cg.module.storageClass(un_ptr_ty.ptrAddressSpace(zcu)));
+
+ const union_ptr_id = try cg.resolve(bin_op.lhs);
+ const new_tag_id = try cg.resolve(bin_op.rhs);
+
+ if (!layout.has_payload) {
+ try cg.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
+ } else {
+ const ptr_id = try cg.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index});
+ try cg.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(zcu) });
+ }
+}
+
+fn airGetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const un_ty = cg.typeOf(ty_op.operand);
+
+ const zcu = cg.module.zcu;
+ const layout = cg.unionLayout(un_ty);
+ if (layout.tag_size == 0) return null;
+
+ const union_handle = try cg.resolve(ty_op.operand);
+ if (!layout.has_payload) return union_handle;
+
+ const tag_ty = un_ty.unionTagTypeSafety(zcu).?;
+ return try cg.extractField(tag_ty, union_handle, layout.tag_index);
+}
+
+fn unionInit(
+ cg: *CodeGen,
+ ty: Type,
+ active_field: u32,
+ payload: ?Id,
+) !Id {
+ // To initialize a union, generate a temporary variable with the
+ // union type, then get the field pointer and pointer-cast it to the
+ // right type to store it. Finally load the entire union.
+
+ // Note: The result here is not cached, because it generates runtime code.
+
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const union_ty = zcu.typeToUnion(ty).?;
+ const tag_ty: Type = .fromInterned(union_ty.enum_tag_ty);
+
+ const layout = cg.unionLayout(ty);
+ const payload_ty: Type = .fromInterned(union_ty.field_types.get(ip)[active_field]);
+
+ if (union_ty.flagsUnordered(ip).layout == .@"packed") {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
+ return cg.constInt(int_ty, 0);
+ }
+
+ assert(payload != null);
+ if (payload_ty.isInt(zcu)) {
+ if (ty.bitSize(zcu) == payload_ty.bitSize(zcu)) {
+ return cg.bitCast(ty, payload_ty, payload.?);
+ }
+
+ const trunc = try cg.buildConvert(ty, .{ .ty = payload_ty, .value = .{ .singleton = payload.? } });
+ return try trunc.materialize(cg);
+ }
+
+ const payload_int_ty = try pt.intType(.unsigned, @intCast(payload_ty.bitSize(zcu)));
+ const payload_int = if (payload_ty.ip_index == .bool_type)
+ try cg.convertToIndirect(payload_ty, payload.?)
+ else
+ try cg.bitCast(payload_int_ty, payload_ty, payload.?);
+ const trunc = try cg.buildConvert(ty, .{ .ty = payload_int_ty, .value = .{ .singleton = payload_int } });
+ return try trunc.materialize(cg);
+ }
+
+ const tag_int = if (layout.tag_size != 0) blk: {
+ const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
+ const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
+ break :blk tag_int_val.toUnsignedInt(zcu);
+ } else 0;
+
+ if (!layout.has_payload) {
+ return try cg.constInt(tag_ty, tag_int);
+ }
+
+ const tmp_id = try cg.alloc(ty, .{ .storage_class = .function });
+
+ if (layout.tag_size != 0) {
+ const tag_ty_id = try cg.resolveType(tag_ty, .indirect);
+ const tag_ptr_ty_id = try cg.module.ptrType(tag_ty_id, .function);
+ const ptr_id = try cg.accessChain(tag_ptr_ty_id, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
+ const tag_id = try cg.constInt(tag_ty, tag_int);
+ try cg.store(tag_ty, ptr_id, tag_id, .{});
+ }
+
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const layout_payload_ty_id = try cg.resolveType(layout.payload_ty, .indirect);
+ const pl_ptr_ty_id = try cg.module.ptrType(layout_payload_ty_id, .function);
+ const pl_ptr_id = try cg.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
+ const active_pl_ptr_id = if (!layout.payload_ty.eql(payload_ty, zcu)) blk: {
+ const payload_ty_id = try cg.resolveType(payload_ty, .indirect);
+ const active_pl_ptr_ty_id = try cg.module.ptrType(payload_ty_id, .function);
+ const active_pl_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = active_pl_ptr_ty_id,
+ .id_result = active_pl_ptr_id,
+ .operand = pl_ptr_id,
+ });
+ break :blk active_pl_ptr_id;
+ } else pl_ptr_id;
+
+ try cg.store(payload_ty, active_pl_ptr_id, payload.?, .{});
+ } else {
+ assert(payload == null);
+ }
+
+ // Just leave the padding fields uninitialized...
+ // TODO: Or should we initialize them with undef explicitly?
+
+ return try cg.load(ty, tmp_id, .{});
+}
+
+fn airUnionInit(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ip = &zcu.intern_pool;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ const ty = cg.typeOfIndex(inst);
+
+ const union_obj = zcu.typeToUnion(ty).?;
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
+ const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ try cg.resolve(extra.init)
+ else
+ null;
+ return try cg.unionInit(ty, extra.field_index, payload);
+}
+
+fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
+
+ const object_ty = cg.typeOf(struct_field.struct_operand);
+ const object_id = try cg.resolve(struct_field.struct_operand);
+ const field_index = struct_field.field_index;
+ const field_ty = object_ty.fieldType(field_index, zcu);
+
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
+
+ switch (object_ty.zigTypeTag(zcu)) {
+ .@"struct" => switch (object_ty.containerLayout(zcu)) {
+ .@"packed" => {
+ const struct_ty = zcu.typeToPackedStruct(object_ty).?;
+ const struct_backing_int_bits = cg.module.backingIntBits(@intCast(object_ty.bitSize(zcu))).@"0";
+ const bit_offset = zcu.structPackedFieldBitOffset(struct_ty, field_index);
+ // We use the same int type the packed struct is backed by, because even though it would
+ // be valid SPIR-V to use an smaller type like u16, some implementations like PoCL will complain.
+ const bit_offset_id = try cg.constInt(object_ty, bit_offset);
+ const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
+ const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const field_int_ty = try pt.intType(signedness, field_bit_size);
+ const shift_lhs: Temporary = .{ .ty = object_ty, .value = .{ .singleton = object_id } };
+ const shift = try cg.buildBinary(.OpShiftRightLogical, shift_lhs, .{ .ty = object_ty, .value = .{ .singleton = bit_offset_id } });
+ const mask_id = try cg.constInt(object_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
+ const masked = try cg.buildBinary(.OpBitwiseAnd, shift, .{ .ty = object_ty, .value = .{ .singleton = mask_id } });
+ const result_id = blk: {
+ if (cg.module.backingIntBits(field_bit_size).@"0" == struct_backing_int_bits)
+ break :blk try cg.bitCast(field_int_ty, object_ty, try masked.materialize(cg));
+ const trunc = try cg.buildConvert(field_int_ty, masked);
+ break :blk try trunc.materialize(cg);
+ };
+ if (field_ty.ip_index == .bool_type) return try cg.convertToDirect(.bool, result_id);
+ if (field_ty.isInt(zcu)) return result_id;
+ return try cg.bitCast(field_ty, field_int_ty, result_id);
+ },
+ else => return try cg.extractField(field_ty, object_id, field_index),
+ },
+ .@"union" => switch (object_ty.containerLayout(zcu)) {
+ .@"packed" => {
+ const backing_int_ty = try pt.intType(.unsigned, @intCast(object_ty.bitSize(zcu)));
+ const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
+ const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
+ const int_ty = try pt.intType(signedness, field_bit_size);
+ const mask_id = try cg.constInt(backing_int_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
+ const masked = try cg.buildBinary(
+ .OpBitwiseAnd,
+ .{ .ty = backing_int_ty, .value = .{ .singleton = object_id } },
+ .{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } },
+ );
+ const result_id = blk: {
+ if (cg.module.backingIntBits(field_bit_size).@"0" == cg.module.backingIntBits(@intCast(backing_int_ty.bitSize(zcu))).@"0")
+ break :blk try cg.bitCast(int_ty, backing_int_ty, try masked.materialize(cg));
+ const trunc = try cg.buildConvert(int_ty, masked);
+ break :blk try trunc.materialize(cg);
+ };
+ if (field_ty.ip_index == .bool_type) return try cg.convertToDirect(.bool, result_id);
+ if (field_ty.isInt(zcu)) return result_id;
+ return try cg.bitCast(field_ty, int_ty, result_id);
+ },
+ else => {
+ // Store, ptr-elem-ptr, pointer-cast, load
+ const layout = cg.unionLayout(object_ty);
+ assert(layout.has_payload);
+
+ const tmp_id = try cg.alloc(object_ty, .{ .storage_class = .function });
+ try cg.store(object_ty, tmp_id, object_id, .{});
+
+ const layout_payload_ty_id = try cg.resolveType(layout.payload_ty, .indirect);
+ const pl_ptr_ty_id = try cg.module.ptrType(layout_payload_ty_id, .function);
+ const pl_ptr_id = try cg.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
+
+ const field_ty_id = try cg.resolveType(field_ty, .indirect);
+ const active_pl_ptr_ty_id = try cg.module.ptrType(field_ty_id, .function);
+ const active_pl_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = active_pl_ptr_ty_id,
+ .id_result = active_pl_ptr_id,
+ .operand = pl_ptr_id,
+ });
+ return try cg.load(field_ty, active_pl_ptr_id, .{});
+ },
+ },
+ else => unreachable,
+ }
+}
+
+fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+
+ const parent_ty = ty_pl.ty.toType().childType(zcu);
+ const result_ty_id = try cg.resolveType(ty_pl.ty.toType(), .indirect);
+
+ const field_ptr = try cg.resolve(extra.field_ptr);
+ const field_ptr_int = try cg.intFromPtr(field_ptr);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
+
+ const base_ptr_int = base_ptr_int: {
+ if (field_offset == 0) break :base_ptr_int field_ptr_int;
+
+ const field_offset_id = try cg.constInt(.usize, field_offset);
+ const field_ptr_tmp: Temporary = .init(.usize, field_ptr_int);
+ const field_offset_tmp: Temporary = .init(.usize, field_offset_id);
+ const result = try cg.buildBinary(.OpISub, field_ptr_tmp, field_offset_tmp);
+ break :base_ptr_int try result.materialize(cg);
+ };
+
+ const base_ptr = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpConvertUToPtr, .{
+ .id_result_type = result_ty_id,
+ .id_result = base_ptr,
+ .integer_value = base_ptr_int,
+ });
+
+ return base_ptr;
+}
+
+fn structFieldPtr(
+ cg: *CodeGen,
+ result_ptr_ty: Type,
+ object_ptr_ty: Type,
+ object_ptr: Id,
+ field_index: u32,
+) !Id {
+ const result_ty_id = try cg.resolveType(result_ptr_ty, .direct);
+
+ const zcu = cg.module.zcu;
+ const object_ty = object_ptr_ty.childType(zcu);
+ switch (object_ty.zigTypeTag(zcu)) {
+ .pointer => {
+ assert(object_ty.isSlice(zcu));
+ return cg.accessChain(result_ty_id, object_ptr, &.{field_index});
+ },
+ .@"struct" => switch (object_ty.containerLayout(zcu)) {
+ .@"packed" => return cg.todo("implement field access for packed structs", .{}),
+ else => {
+ return try cg.accessChain(result_ty_id, object_ptr, &.{field_index});
+ },
+ },
+ .@"union" => {
+ const layout = cg.unionLayout(object_ty);
+ if (!layout.has_payload) {
+ // Asked to get a pointer to a zero-sized field. Just lower this
+ // to undefined, there is no reason to make it be a valid pointer.
+ return try cg.module.constUndef(result_ty_id);
+ }
+
+ const storage_class = cg.module.storageClass(object_ptr_ty.ptrAddressSpace(zcu));
+ const layout_payload_ty_id = try cg.resolveType(layout.payload_ty, .indirect);
+ const pl_ptr_ty_id = try cg.module.ptrType(layout_payload_ty_id, storage_class);
+ const pl_ptr_id = blk: {
+ if (object_ty.containerLayout(zcu) == .@"packed") break :blk object_ptr;
+ break :blk try cg.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
+ };
+
+ const active_pl_ptr_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBitcast, .{
+ .id_result_type = result_ty_id,
+ .id_result = active_pl_ptr_id,
+ .operand = pl_ptr_id,
+ });
+ return active_pl_ptr_id;
+ },
+ else => unreachable,
+ }
+}
+
+fn airStructFieldPtrIndex(cg: *CodeGen, inst: Air.Inst.Index, field_index: u32) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const struct_ptr = try cg.resolve(ty_op.operand);
+ const struct_ptr_ty = cg.typeOf(ty_op.operand);
+ const result_ptr_ty = cg.typeOfIndex(inst);
+ return try cg.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index);
+}
+
+const AllocOptions = struct {
+ initializer: ?Id = null,
+ /// The final storage class of the pointer. This may be either `.Generic` or `.Function`.
+ /// In either case, the local is allocated in the `.Function` storage class, and optionally
+ /// cast back to `.Generic`.
+ storage_class: StorageClass,
+};
+
+// Allocate a function-local variable, with possible initializer.
+// This function returns a pointer to a variable of type `ty`,
+// which is in the Generic address space. The variable is actually
+// placed in the Function address space.
+fn alloc(
+ cg: *CodeGen,
+ ty: Type,
+ options: AllocOptions,
+) !Id {
+ const ty_id = try cg.resolveType(ty, .indirect);
+ const ptr_fn_ty_id = try cg.module.ptrType(ty_id, .function);
+
+ // SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
+ // directly generate them into func.prologue instead of the body.
+ const var_id = cg.module.allocId();
+ try cg.prologue.emit(cg.module.gpa, .OpVariable, .{
+ .id_result_type = ptr_fn_ty_id,
+ .id_result = var_id,
+ .storage_class = .function,
+ .initializer = options.initializer,
+ });
+
+ return var_id;
+}
+
+fn airAlloc(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ptr_ty = cg.typeOfIndex(inst);
+ const child_ty = ptr_ty.childType(zcu);
+ return try cg.alloc(child_ty, .{
+ .storage_class = cg.module.storageClass(ptr_ty.ptrAddressSpace(zcu)),
+ });
+}
+
+fn airArg(cg: *CodeGen) Id {
+ defer cg.next_arg_index += 1;
+ return cg.args.items[cg.next_arg_index];
+}
+
+/// Given a slice of incoming block connections, returns the block-id of the next
+/// block to jump to. This function emits instructions, so it should be emitted
+/// inside the merge block of the block.
+/// This function should only be called with structured control flow generation.
+fn structuredNextBlock(cg: *CodeGen, incoming: []const ControlFlow.Structured.Block.Incoming) !Id {
+ assert(cg.control_flow == .structured);
+
+ const result_id = cg.module.allocId();
+ const block_id_ty_id = try cg.resolveType(.u32, .direct);
+ try cg.body.emitRaw(cg.module.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent...
+ cg.body.writeOperand(Id, block_id_ty_id);
+ cg.body.writeOperand(Id, result_id);
+
+ for (incoming) |incoming_block| {
+ cg.body.writeOperand(spec.PairIdRefIdRef, .{ incoming_block.next_block, incoming_block.src_label });
+ }
+
+ return result_id;
+}
+
+/// Jumps to the block with the target block-id. This function must only be called when
+/// terminating a body, there should be no instructions after it.
+/// This function should only be called with structured control flow generation.
+fn structuredBreak(cg: *CodeGen, target_block: Id) !void {
+ assert(cg.control_flow == .structured);
+
+ const gpa = cg.module.gpa;
+ const sblock = cg.control_flow.structured.block_stack.getLast();
+ const merge_block = switch (sblock.*) {
+ .selection => |*merge| blk: {
+ const merge_label = cg.module.allocId();
+ try merge.merge_stack.append(gpa, .{
+ .incoming = .{
+ .src_label = cg.block_label,
+ .next_block = target_block,
+ },
+ .merge_block = merge_label,
+ });
+ break :blk merge_label;
+ },
+ // Loop blocks do not end in a break. Not through a direct break,
+ // and also not through another instruction like cond_br or unreachable (these
+ // situations are replaced by `cond_br` in sema, or there is a `block` instruction
+ // placed around them).
+ .loop => unreachable,
+ };
+
+ try cg.body.emitBranch(cg.module.gpa, merge_block);
+}
+
+/// Generate a body in a way that exits the body using only structured constructs.
+/// Returns the block-id of the next block to jump to. After this function, a jump
+/// should still be emitted to the block that should follow this structured body.
+/// This function should only be called with structured control flow generation.
+fn genStructuredBody(
+ cg: *CodeGen,
+ /// This parameter defines the method that this structured body is exited with.
+ block_merge_type: union(enum) {
+ /// Using selection; early exits from this body are surrounded with
+ /// if() statements.
+ selection,
+ /// Using loops; loops can be early exited by jumping to the merge block at
+ /// any time.
+ loop: struct {
+ merge_label: Id,
+ continue_label: Id,
+ },
+ },
+ body: []const Air.Inst.Index,
+) !Id {
+ assert(cg.control_flow == .structured);
+
+ const gpa = cg.module.gpa;
+
+ var sblock: ControlFlow.Structured.Block = switch (block_merge_type) {
+ .loop => |merge| .{ .loop = .{
+ .merge_block = merge.merge_label,
+ } },
+ .selection => .{ .selection = .{} },
+ };
+ defer sblock.deinit(gpa);
+
+ {
+ try cg.control_flow.structured.block_stack.append(gpa, &sblock);
+ defer _ = cg.control_flow.structured.block_stack.pop();
+
+ try cg.genBody(body);
+ }
+
+ switch (sblock) {
+ .selection => |merge| {
+ // Now generate the merge block for all merges that
+ // still need to be performed.
+ const merge_stack = merge.merge_stack.items;
+
+ // If no merges on the stack, this block didn't generate any jumps (all paths
+ // ended with a return or an unreachable). In that case, we don't need to do
+ // any merging.
+ if (merge_stack.len == 0) {
+ // We still need to return a value of a next block to jump to.
+ // For example, if we have code like
+ // if (x) {
+ // if (y) return else return;
+ // } else {}
+ // then we still need the outer to have an OpSelectionMerge and consequently
+ // a phi node. In that case we can just return bogus, since we know that its
+ // path will never be taken.
+
+ // Make sure that we are still in a block when exiting the function.
+ // TODO: Can we get rid of that?
+ try cg.beginSpvBlock(cg.module.allocId());
+ const block_id_ty_id = try cg.resolveType(.u32, .direct);
+ return try cg.module.constUndef(block_id_ty_id);
+ }
+
+ // The top-most merge actually only has a single source, the
+ // final jump of the block, or the merge block of a sub-block, cond_br,
+ // or loop. Therefore we just need to generate a block with a jump to the
+ // next merge block.
+ try cg.beginSpvBlock(merge_stack[merge_stack.len - 1].merge_block);
+
+ // Now generate a merge ladder for the remaining merges in the stack.
+ var incoming: ControlFlow.Structured.Block.Incoming = .{
+ .src_label = cg.block_label,
+ .next_block = merge_stack[merge_stack.len - 1].incoming.next_block,
+ };
+ var i = merge_stack.len - 1;
+ while (i > 0) {
+ i -= 1;
+ const step = merge_stack[i];
+ try cg.body.emitBranch(cg.module.gpa, step.merge_block);
+ try cg.beginSpvBlock(step.merge_block);
+ const next_block = try cg.structuredNextBlock(&.{ incoming, step.incoming });
+ incoming = .{
+ .src_label = step.merge_block,
+ .next_block = next_block,
+ };
+ }
+
+ return incoming.next_block;
+ },
+ .loop => |merge| {
+ // Close the loop by jumping to the continue label
+ try cg.body.emitBranch(cg.module.gpa, block_merge_type.loop.continue_label);
+ // For blocks we must simple merge all the incoming blocks to get the next block.
+ try cg.beginSpvBlock(merge.merge_block);
+ return try cg.structuredNextBlock(merge.merges.items);
+ },
+ }
+}
+
+fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const inst_datas = cg.air.instructions.items(.data);
+ const extra = cg.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
+ return cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
+}
+
+fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?Id {
+ // In AIR, a block doesn't really define an entry point like a block, but
+ // more like a scope that breaks can jump out of and "return" a value from.
+ // This cannot be directly modelled in SPIR-V, so in a block instruction,
+ // we're going to split up the current block by first generating the code
+ // of the block, then a label, and then generate the rest of the current
+ // ir.Block in a different SPIR-V block.
+
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const ty = cg.typeOfIndex(inst);
+ const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
+
+ const cf = switch (cg.control_flow) {
+ .structured => |*cf| cf,
+ .unstructured => |*cf| {
+ var block: ControlFlow.Unstructured.Block = .{};
+ defer block.incoming_blocks.deinit(gpa);
+
+ // 4 chosen as arbitrary initial capacity.
+ try block.incoming_blocks.ensureUnusedCapacity(gpa, 4);
+
+ try cf.blocks.putNoClobber(gpa, inst, &block);
+ defer assert(cf.blocks.remove(inst));
+
+ try cg.genBody(body);
+
+ // Only begin a new block if there were actually any breaks towards it.
+ if (block.label) |label| {
+ try cg.beginSpvBlock(label);
+ }
+
+ if (!have_block_result)
+ return null;
+
+ assert(block.label != null);
+ const result_id = cg.module.allocId();
+ const result_type_id = try cg.resolveType(ty, .direct);
+
+ try cg.body.emitRaw(
+ cg.module.gpa,
+ .OpPhi,
+ // result type + result + variable/parent...
+ 2 + @as(u16, @intCast(block.incoming_blocks.items.len * 2)),
+ );
+ cg.body.writeOperand(Id, result_type_id);
+ cg.body.writeOperand(Id, result_id);
+
+ for (block.incoming_blocks.items) |incoming| {
+ cg.body.writeOperand(
+ spec.PairIdRefIdRef,
+ .{ incoming.break_value_id, incoming.src_label },
+ );
+ }
+
+ return result_id;
+ },
+ };
+
+ const maybe_block_result_var_id = if (have_block_result) blk: {
+ const block_result_var_id = try cg.alloc(ty, .{ .storage_class = .function });
+ try cf.block_results.putNoClobber(gpa, inst, block_result_var_id);
+ break :blk block_result_var_id;
+ } else null;
+ defer if (have_block_result) assert(cf.block_results.remove(inst));
+
+ const next_block = try cg.genStructuredBody(.selection, body);
+
+ // When encountering a block instruction, we are always at least in the function's scope,
+ // so there always has to be another entry.
+ assert(cf.block_stack.items.len > 0);
+
+ // Check if the target of the branch was this current block.
+ const this_block = try cg.constInt(.u32, @intFromEnum(inst));
+ const jump_to_this_block_id = cg.module.allocId();
+ const bool_ty_id = try cg.resolveType(.bool, .direct);
+ try cg.body.emit(cg.module.gpa, .OpIEqual, .{
+ .id_result_type = bool_ty_id,
+ .id_result = jump_to_this_block_id,
+ .operand_1 = next_block,
+ .operand_2 = this_block,
+ });
+
+ const sblock = cf.block_stack.getLast();
+
+ if (ty.isNoReturn(zcu)) {
+ // If this block is noreturn, this instruction is the last of a block,
+ // and we must simply jump to the block's merge unconditionally.
+ try cg.structuredBreak(next_block);
+ } else {
+ switch (sblock.*) {
+ .selection => |*merge| {
+ // To jump out of a selection block, push a new entry onto its merge stack and
+ // generate a conditional branch to there and to the instructions following this block.
+ const merge_label = cg.module.allocId();
+ const then_label = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label,
+ .selection_control = .{},
+ });
+ try cg.body.emit(cg.module.gpa, .OpBranchConditional, .{
+ .condition = jump_to_this_block_id,
+ .true_label = then_label,
+ .false_label = merge_label,
+ });
+ try merge.merge_stack.append(gpa, .{
+ .incoming = .{
+ .src_label = cg.block_label,
+ .next_block = next_block,
+ },
+ .merge_block = merge_label,
+ });
+
+ try cg.beginSpvBlock(then_label);
+ },
+ .loop => |*merge| {
+ // To jump out of a loop block, generate a conditional that exits the block
+ // to the loop merge if the target ID is not the one of this block.
+ const continue_label = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpBranchConditional, .{
+ .condition = jump_to_this_block_id,
+ .true_label = continue_label,
+ .false_label = merge.merge_block,
+ });
+ try merge.merges.append(gpa, .{
+ .src_label = cg.block_label,
+ .next_block = next_block,
+ });
+ try cg.beginSpvBlock(continue_label);
+ },
+ }
+ }
+
+ if (maybe_block_result_var_id) |block_result_var_id| {
+ return try cg.load(ty, block_result_var_id, .{});
+ }
+
+ return null;
+}
+
+fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const br = cg.air.instructions.items(.data)[@intFromEnum(inst)].br;
+ const operand_ty = cg.typeOf(br.operand);
+
+ switch (cg.control_flow) {
+ .structured => |*cf| {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
+ const operand_id = try cg.resolve(br.operand);
+ const block_result_var_id = cf.block_results.get(br.block_inst).?;
+ try cg.store(operand_ty, block_result_var_id, operand_id, .{});
+ }
+
+ const next_block = try cg.constInt(.u32, @intFromEnum(br.block_inst));
+ try cg.structuredBreak(next_block);
+ },
+ .unstructured => |cf| {
+ const block = cf.blocks.get(br.block_inst).?;
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
+ const operand_id = try cg.resolve(br.operand);
+ // block_label should not be undefined here, lest there
+ // is a br or br_void in the function's body.
+ try block.incoming_blocks.append(gpa, .{
+ .src_label = cg.block_label,
+ .break_value_id = operand_id,
+ });
+ }
+
+ if (block.label == null) {
+ block.label = cg.module.allocId();
+ }
+
+ try cg.body.emitBranch(cg.module.gpa, block.label.?);
+ },
+ }
+}
+
+fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const cond_br = cg.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[cond_br.end..][0..cond_br.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]);
+ const condition_id = try cg.resolve(pl_op.operand);
+
+ const then_label = cg.module.allocId();
+ const else_label = cg.module.allocId();
+
+ switch (cg.control_flow) {
+ .structured => {
+ const merge_label = cg.module.allocId();
+
+ try cg.body.emit(cg.module.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label,
+ .selection_control = .{},
+ });
+ try cg.body.emit(cg.module.gpa, .OpBranchConditional, .{
+ .condition = condition_id,
+ .true_label = then_label,
+ .false_label = else_label,
+ });
+
+ try cg.beginSpvBlock(then_label);
+ const then_next = try cg.genStructuredBody(.selection, then_body);
+ const then_incoming: ControlFlow.Structured.Block.Incoming = .{
+ .src_label = cg.block_label,
+ .next_block = then_next,
+ };
+ try cg.body.emitBranch(cg.module.gpa, merge_label);
+
+ try cg.beginSpvBlock(else_label);
+ const else_next = try cg.genStructuredBody(.selection, else_body);
+ const else_incoming: ControlFlow.Structured.Block.Incoming = .{
+ .src_label = cg.block_label,
+ .next_block = else_next,
+ };
+ try cg.body.emitBranch(cg.module.gpa, merge_label);
+
+ try cg.beginSpvBlock(merge_label);
+ const next_block = try cg.structuredNextBlock(&.{ then_incoming, else_incoming });
+
+ try cg.structuredBreak(next_block);
+ },
+ .unstructured => {
+ try cg.body.emit(cg.module.gpa, .OpBranchConditional, .{
+ .condition = condition_id,
+ .true_label = then_label,
+ .false_label = else_label,
+ });
+
+ try cg.beginSpvBlock(then_label);
+ try cg.genBody(then_body);
+ try cg.beginSpvBlock(else_label);
+ try cg.genBody(else_body);
+ },
+ }
+}
+
+fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const loop = cg.air.extraData(Air.Block, ty_pl.payload);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[loop.end..][0..loop.data.body_len]);
+
+ const body_label = cg.module.allocId();
+
+ switch (cg.control_flow) {
+ .structured => {
+ const header_label = cg.module.allocId();
+ const merge_label = cg.module.allocId();
+ const continue_label = cg.module.allocId();
+
+ // The back-edge must point to the loop header, so generate a separate block for the
+ // loop header so that we don't accidentally include some instructions from there
+ // in the loop.
+ try cg.body.emitBranch(cg.module.gpa, header_label);
+ try cg.beginSpvBlock(header_label);
+
+ // Emit loop header and jump to loop body
+ try cg.body.emit(cg.module.gpa, .OpLoopMerge, .{
+ .merge_block = merge_label,
+ .continue_target = continue_label,
+ .loop_control = .{},
+ });
+ try cg.body.emitBranch(cg.module.gpa, body_label);
+
+ try cg.beginSpvBlock(body_label);
+
+ const next_block = try cg.genStructuredBody(.{ .loop = .{
+ .merge_label = merge_label,
+ .continue_label = continue_label,
+ } }, body);
+ try cg.structuredBreak(next_block);
+
+ try cg.beginSpvBlock(continue_label);
+ try cg.body.emitBranch(cg.module.gpa, header_label);
+ },
+ .unstructured => {
+ try cg.body.emitBranch(cg.module.gpa, body_label);
+ try cg.beginSpvBlock(body_label);
+ try cg.genBody(body);
+ try cg.body.emitBranch(cg.module.gpa, body_label);
+ },
+ }
+}
+
+fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const ptr_ty = cg.typeOf(ty_op.operand);
+ const elem_ty = cg.typeOfIndex(inst);
+ const operand = try cg.resolve(ty_op.operand);
+ if (!ptr_ty.isVolatilePtr(zcu) and cg.liveness.isUnused(inst)) return null;
+
+ return try cg.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
+}
+
+fn airStore(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ const ptr_ty = cg.typeOf(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(zcu);
+ const ptr = try cg.resolve(bin_op.lhs);
+ const value = try cg.resolve(bin_op.rhs);
+
+ try cg.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
+}
+
+fn airRet(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const operand = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const ret_ty = cg.typeOf(operand);
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
+ // Functions with an empty error set are emitted with an error code
+ // return type and return zero so they can be function pointers coerced
+ // to functions that return anyerror.
+ const no_err_id = try cg.constInt(.anyerror, 0);
+ return try cg.body.emit(cg.module.gpa, .OpReturnValue, .{ .value = no_err_id });
+ } else {
+ return try cg.body.emit(cg.module.gpa, .OpReturn, {});
+ }
+ }
+
+ const operand_id = try cg.resolve(operand);
+ try cg.body.emit(cg.module.gpa, .OpReturnValue, .{ .value = operand_id });
+}
+
+fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const ptr_ty = cg.typeOf(un_op);
+ const ret_ty = ptr_ty.childType(zcu);
+
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
+ if (Type.fromInterned(fn_info.return_type).isError(zcu)) {
+ // Functions with an empty error set are emitted with an error code
+ // return type and return zero so they can be function pointers coerced
+ // to functions that return anyerror.
+ const no_err_id = try cg.constInt(.anyerror, 0);
+ return try cg.body.emit(cg.module.gpa, .OpReturnValue, .{ .value = no_err_id });
+ } else {
+ return try cg.body.emit(cg.module.gpa, .OpReturn, {});
+ }
+ }
+
+ const ptr = try cg.resolve(un_op);
+ const value = try cg.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
+ try cg.body.emit(cg.module.gpa, .OpReturnValue, .{
+ .value = value,
+ });
+}
+
+fn airTry(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const err_union_id = try cg.resolve(pl_op.operand);
+ const extra = cg.air.extraData(Air.Try, pl_op.payload);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
+
+ const err_union_ty = cg.typeOf(pl_op.operand);
+ const payload_ty = cg.typeOfIndex(inst);
+
+ const bool_ty_id = try cg.resolveType(.bool, .direct);
+
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+
+ if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
+ const err_id = if (eu_layout.payload_has_bits)
+ try cg.extractField(.anyerror, err_union_id, eu_layout.errorFieldIndex())
+ else
+ err_union_id;
+
+ const zero_id = try cg.constInt(.anyerror, 0);
+ const is_err_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpINotEqual, .{
+ .id_result_type = bool_ty_id,
+ .id_result = is_err_id,
+ .operand_1 = err_id,
+ .operand_2 = zero_id,
+ });
+
+ // When there is an error, we must evaluate `body`. Otherwise we must continue
+ // with the current body.
+ // Just generate a new block here, then generate a new block inline for the remainder of the body.
+
+ const err_block = cg.module.allocId();
+ const ok_block = cg.module.allocId();
+
+ switch (cg.control_flow) {
+ .structured => {
+ // According to AIR documentation, this block is guaranteed
+ // to not break and end in a return instruction. Thus,
+ // for structured control flow, we can just naively use
+ // the ok block as the merge block here.
+ try cg.body.emit(cg.module.gpa, .OpSelectionMerge, .{
+ .merge_block = ok_block,
+ .selection_control = .{},
+ });
+ },
+ .unstructured => {},
+ }
+
+ try cg.body.emit(cg.module.gpa, .OpBranchConditional, .{
+ .condition = is_err_id,
+ .true_label = err_block,
+ .false_label = ok_block,
+ });
+
+ try cg.beginSpvBlock(err_block);
+ try cg.genBody(body);
+
+ try cg.beginSpvBlock(ok_block);
+ }
+
+ if (!eu_layout.payload_has_bits) {
+ return null;
+ }
+
+ // Now just extract the payload, if required.
+ return try cg.extractField(payload_ty, err_union_id, eu_layout.payloadFieldIndex());
+}
+
+fn airErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_id = try cg.resolve(ty_op.operand);
+ const err_union_ty = cg.typeOf(ty_op.operand);
+ const err_ty_id = try cg.resolveType(.anyerror, .direct);
+
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
+ // No error possible, so just return undefined.
+ return try cg.module.constUndef(err_ty_id);
+ }
+
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+
+ if (!eu_layout.payload_has_bits) {
+ // If no payload, error union is represented by error set.
+ return operand_id;
+ }
+
+ return try cg.extractField(.anyerror, operand_id, eu_layout.errorFieldIndex());
+}
+
+fn airErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_id = try cg.resolve(ty_op.operand);
+ const payload_ty = cg.typeOfIndex(inst);
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+
+ if (!eu_layout.payload_has_bits) {
+ return null; // No error possible.
+ }
+
+ return try cg.extractField(payload_ty, operand_id, eu_layout.payloadFieldIndex());
+}
+
+fn airWrapErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const err_union_ty = cg.typeOfIndex(inst);
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const operand_id = try cg.resolve(ty_op.operand);
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+
+ if (!eu_layout.payload_has_bits) {
+ return operand_id;
+ }
+
+ const payload_ty_id = try cg.resolveType(payload_ty, .indirect);
+
+ var members: [2]Id = undefined;
+ members[eu_layout.errorFieldIndex()] = operand_id;
+ members[eu_layout.payloadFieldIndex()] = try cg.module.constUndef(payload_ty_id);
+
+ var types: [2]Type = undefined;
+ types[eu_layout.errorFieldIndex()] = .anyerror;
+ types[eu_layout.payloadFieldIndex()] = payload_ty;
+
+ const err_union_ty_id = try cg.resolveType(err_union_ty, .direct);
+ return try cg.constructComposite(err_union_ty_id, &members);
+}
+
+fn airWrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const err_union_ty = cg.typeOfIndex(inst);
+ const operand_id = try cg.resolve(ty_op.operand);
+ const payload_ty = cg.typeOf(ty_op.operand);
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+
+ if (!eu_layout.payload_has_bits) {
+ return try cg.constInt(.anyerror, 0);
+ }
+
+ var members: [2]Id = undefined;
+ members[eu_layout.errorFieldIndex()] = try cg.constInt(.anyerror, 0);
+ members[eu_layout.payloadFieldIndex()] = try cg.convertToIndirect(payload_ty, operand_id);
+
+ var types: [2]Type = undefined;
+ types[eu_layout.errorFieldIndex()] = .anyerror;
+ types[eu_layout.payloadFieldIndex()] = payload_ty;
+
+ const err_union_ty_id = try cg.resolveType(err_union_ty, .direct);
+ return try cg.constructComposite(err_union_ty_id, &members);
+}
+
+fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?Id {
+ const zcu = cg.module.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand_id = try cg.resolve(un_op);
+ const operand_ty = cg.typeOf(un_op);
+ const optional_ty = if (is_pointer) operand_ty.childType(zcu) else operand_ty;
+ const payload_ty = optional_ty.optionalChild(zcu);
+
+ const bool_ty_id = try cg.resolveType(.bool, .direct);
+
+ if (optional_ty.optionalReprIsPayload(zcu)) {
+ // Pointer payload represents nullability: pointer or slice.
+ const loaded_id = if (is_pointer)
+ try cg.load(optional_ty, operand_id, .{})
+ else
+ operand_id;
+
+ const ptr_ty = if (payload_ty.isSlice(zcu))
+ payload_ty.slicePtrFieldType(zcu)
+ else
+ payload_ty;
+
+ const ptr_id = if (payload_ty.isSlice(zcu))
+ try cg.extractField(ptr_ty, loaded_id, 0)
+ else
+ loaded_id;
+
+ const ptr_ty_id = try cg.resolveType(ptr_ty, .direct);
+ const null_id = try cg.module.constNull(ptr_ty_id);
+ const null_tmp: Temporary = .init(ptr_ty, null_id);
+ const ptr: Temporary = .init(ptr_ty, ptr_id);
+
+ const op: std.math.CompareOperator = switch (pred) {
+ .is_null => .eq,
+ .is_non_null => .neq,
+ };
+ const result = try cg.cmp(op, ptr, null_tmp);
+ return try result.materialize(cg);
+ }
+
+ const is_non_null_id = blk: {
+ if (is_pointer) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ const storage_class = cg.module.storageClass(operand_ty.ptrAddressSpace(zcu));
+ const bool_indirect_ty_id = try cg.resolveType(.bool, .indirect);
+ const bool_ptr_ty_id = try cg.module.ptrType(bool_indirect_ty_id, storage_class);
+ const tag_ptr_id = try cg.accessChain(bool_ptr_ty_id, operand_id, &.{1});
+ break :blk try cg.load(.bool, tag_ptr_id, .{});
+ }
+
+ break :blk try cg.load(.bool, operand_id, .{});
+ }
+
+ break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu))
+ try cg.extractField(.bool, operand_id, 1)
+ else
+ // Optional representation is bool indicating whether the optional is set
+ // Optionals with no payload are represented as an (indirect) bool, so convert
+ // it back to the direct bool here.
+ try cg.convertToDirect(.bool, operand_id);
+ };
+
+ return switch (pred) {
+ .is_null => blk: {
+ // Invert condition
+ const result_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpLogicalNot, .{
+ .id_result_type = bool_ty_id,
+ .id_result = result_id,
+ .operand = is_non_null_id,
+ });
+ break :blk result_id;
+ },
+ .is_non_null => is_non_null_id,
+ };
+}
+
+fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?Id {
+ const zcu = cg.module.zcu;
+ const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+ const operand_id = try cg.resolve(un_op);
+ const err_union_ty = cg.typeOf(un_op);
+
+ if (err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
+ return try cg.constBool(pred == .is_non_err, .direct);
+ }
+
+ const payload_ty = err_union_ty.errorUnionPayload(zcu);
+ const eu_layout = cg.errorUnionLayout(payload_ty);
+ const bool_ty_id = try cg.resolveType(.bool, .direct);
+
+ const error_id = if (!eu_layout.payload_has_bits)
+ operand_id
+ else
+ try cg.extractField(.anyerror, operand_id, eu_layout.errorFieldIndex());
+
+ const result_id = cg.module.allocId();
+ switch (pred) {
+ inline else => |pred_ct| try cg.body.emit(
+ cg.module.gpa,
+ switch (pred_ct) {
+ .is_err => .OpINotEqual,
+ .is_non_err => .OpIEqual,
+ },
+ .{
+ .id_result_type = bool_ty_id,
+ .id_result = result_id,
+ .operand_1 = error_id,
+ .operand_2 = try cg.constInt(.anyerror, 0),
+ },
+ ),
+ }
+ return result_id;
+}
+
+fn airUnwrapOptional(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_id = try cg.resolve(ty_op.operand);
+ const optional_ty = cg.typeOf(ty_op.operand);
+ const payload_ty = cg.typeOfIndex(inst);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
+
+ if (optional_ty.optionalReprIsPayload(zcu)) {
+ return operand_id;
+ }
+
+ return try cg.extractField(payload_ty, operand_id, 0);
+}
+
+fn airUnwrapOptionalPtr(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const operand_id = try cg.resolve(ty_op.operand);
+ const operand_ty = cg.typeOf(ty_op.operand);
+ const optional_ty = operand_ty.childType(zcu);
+ const payload_ty = optional_ty.optionalChild(zcu);
+ const result_ty = cg.typeOfIndex(inst);
+ const result_ty_id = try cg.resolveType(result_ty, .direct);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ // There is no payload, but we still need to return a valid pointer.
+ // We can just return anything here, so just return a pointer to the operand.
+ return try cg.bitCast(result_ty, operand_ty, operand_id);
+ }
+
+ if (optional_ty.optionalReprIsPayload(zcu)) {
+ // They are the same value.
+ return try cg.bitCast(result_ty, operand_ty, operand_id);
+ }
+
+ return try cg.accessChain(result_ty_id, operand_id, &.{0});
+}
+
+fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+ const payload_ty = cg.typeOf(ty_op.operand);
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
+ return try cg.constBool(true, .indirect);
+ }
+
+ const operand_id = try cg.resolve(ty_op.operand);
+
+ const optional_ty = cg.typeOfIndex(inst);
+ if (optional_ty.optionalReprIsPayload(zcu)) {
+ return operand_id;
+ }
+
+ const payload_id = try cg.convertToIndirect(payload_ty, operand_id);
+ const members = [_]Id{ payload_id, try cg.constBool(true, .indirect) };
+ const optional_ty_id = try cg.resolveType(optional_ty, .direct);
+ return try cg.constructComposite(optional_ty_id, &members);
+}
+
+fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const gpa = cg.module.gpa;
+ const pt = cg.pt;
+ const zcu = cg.module.zcu;
+ const target = cg.module.zcu.getTarget();
+ const switch_br = cg.air.unwrapSwitch(inst);
+ const cond_ty = cg.typeOf(switch_br.operand);
+ const cond = try cg.resolve(switch_br.operand);
+ var cond_indirect = try cg.convertToIndirect(cond_ty, cond);
+
+ const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) {
+ .bool, .error_set => 1,
+ .int => blk: {
+ const bits = cond_ty.intInfo(zcu).bits;
+ const backing_bits, const big_int = cg.module.backingIntBits(bits);
+ if (big_int) return cg.todo("implement composite int switch", .{});
+ break :blk if (backing_bits <= 32) 1 else 2;
+ },
+ .@"enum" => blk: {
+ const int_ty = cond_ty.intTagType(zcu);
+ const int_info = int_ty.intInfo(zcu);
+ const backing_bits, const big_int = cg.module.backingIntBits(int_info.bits);
+ if (big_int) return cg.todo("implement composite int switch", .{});
+ break :blk if (backing_bits <= 32) 1 else 2;
+ },
+ .pointer => blk: {
+ cond_indirect = try cg.intFromPtr(cond_indirect);
+ break :blk target.ptrBitWidth() / 32;
+ },
+ // TODO: Figure out which types apply here, and work around them as we can only do integers.
+ else => return cg.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(zcu))}),
+ };
+
+ const num_cases = switch_br.cases_len;
+
+ // Compute the total number of arms that we need.
+ // Zig switches are grouped by condition, so we need to loop through all of them
+ const num_conditions = blk: {
+ var num_conditions: u32 = 0;
+ var it = switch_br.iterateCases();
+ while (it.next()) |case| {
+ if (case.ranges.len > 0) return cg.todo("switch with ranges", .{});
+ num_conditions += @intCast(case.items.len);
+ }
+ break :blk num_conditions;
+ };
+
+ // First, pre-allocate the labels for the cases.
+ const case_labels = cg.module.allocIds(num_cases);
+ // We always need the default case - if zig has none, we will generate unreachable there.
+ const default = cg.module.allocId();
+
+ const merge_label = switch (cg.control_flow) {
+ .structured => cg.module.allocId(),
+ .unstructured => null,
+ };
+
+ if (cg.control_flow == .structured) {
+ try cg.body.emit(cg.module.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label.?,
+ .selection_control = .{},
+ });
+ }
+
+ // Emit the instruction before generating the blocks.
+ try cg.body.emitRaw(cg.module.gpa, .OpSwitch, 2 + (cond_words + 1) * num_conditions);
+ cg.body.writeOperand(Id, cond_indirect);
+ cg.body.writeOperand(Id, default);
+
+ // Emit each of the cases
+ {
+ var it = switch_br.iterateCases();
+ while (it.next()) |case| {
+ // SPIR-V needs a literal here, which' width depends on the case condition.
+ const label = case_labels.at(case.idx);
+
+ for (case.items) |item| {
+ const value = (try cg.air.value(item, pt)) orelse unreachable;
+ const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) {
+ .bool, .int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu),
+ .@"enum" => blk: {
+ // TODO: figure out of cond_ty is correct (something with enum literals)
+ break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants
+ },
+ .error_set => value.getErrorInt(zcu),
+ .pointer => value.toUnsignedInt(zcu),
+ else => unreachable,
+ };
+ const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
+ 1 => .{ .uint32 = @intCast(int_val) },
+ 2 => .{ .uint64 = int_val },
+ else => unreachable,
+ };
+ cg.body.writeOperand(spec.LiteralContextDependentNumber, int_lit);
+ cg.body.writeOperand(Id, label);
+ }
+ }
+ }
+
+ var incoming_structured_blocks: std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming) = .empty;
+ defer incoming_structured_blocks.deinit(gpa);
+
+ if (cg.control_flow == .structured) {
+ try incoming_structured_blocks.ensureUnusedCapacity(gpa, num_cases + 1);
+ }
+
+ // Now, finally, we can start emitting each of the cases.
+ var it = switch_br.iterateCases();
+ while (it.next()) |case| {
+ const label = case_labels.at(case.idx);
+
+ try cg.beginSpvBlock(label);
+
+ switch (cg.control_flow) {
+ .structured => {
+ const next_block = try cg.genStructuredBody(.selection, case.body);
+ incoming_structured_blocks.appendAssumeCapacity(.{
+ .src_label = cg.block_label,
+ .next_block = next_block,
+ });
+ try cg.body.emitBranch(cg.module.gpa, merge_label.?);
+ },
+ .unstructured => {
+ try cg.genBody(case.body);
+ },
+ }
+ }
+
+ const else_body = it.elseBody();
+ try cg.beginSpvBlock(default);
+ if (else_body.len != 0) {
+ switch (cg.control_flow) {
+ .structured => {
+ const next_block = try cg.genStructuredBody(.selection, else_body);
+ incoming_structured_blocks.appendAssumeCapacity(.{
+ .src_label = cg.block_label,
+ .next_block = next_block,
+ });
+ try cg.body.emitBranch(cg.module.gpa, merge_label.?);
+ },
+ .unstructured => {
+ try cg.genBody(else_body);
+ },
+ }
+ } else {
+ try cg.body.emit(cg.module.gpa, .OpUnreachable, {});
+ }
+
+ if (cg.control_flow == .structured) {
+ try cg.beginSpvBlock(merge_label.?);
+ const next_block = try cg.structuredNextBlock(incoming_structured_blocks.items);
+ try cg.structuredBreak(next_block);
+ }
+}
+
+fn airUnreach(cg: *CodeGen) !void {
+ try cg.body.emit(cg.module.gpa, .OpUnreachable, {});
+}
+
+fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const zcu = cg.module.zcu;
+ const dbg_stmt = cg.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
+ const path = zcu.navFileScope(cg.owner_nav).sub_file_path;
+
+ if (zcu.comp.config.root_strip) return;
+
+ try cg.body.emit(cg.module.gpa, .OpLine, .{
+ .file = try cg.module.debugString(path),
+ .line = cg.base_line + dbg_stmt.line + 1,
+ .column = dbg_stmt.column + 1,
+ });
+}
+
+fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const zcu = cg.module.zcu;
+ const inst_datas = cg.air.instructions.items(.data);
+ const extra = cg.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
+ const old_base_line = cg.base_line;
+ defer cg.base_line = old_base_line;
+ cg.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav);
+ return cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
+}
+
+fn airDbgVar(cg: *CodeGen, inst: Air.Inst.Index) !void {
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const target_id = try cg.resolve(pl_op.operand);
+ const name: Air.NullTerminatedString = @enumFromInt(pl_op.payload);
+ try cg.module.debugName(target_id, name.toSlice(cg.air));
+}
+
+fn airAssembly(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = cg.air.extraData(Air.Asm, ty_pl.payload);
+
+ const is_volatile = extra.data.flags.is_volatile;
+ const outputs_len = extra.data.flags.outputs_len;
+
+ if (!is_volatile and cg.liveness.isUnused(inst)) return null;
+
+ var extra_i: usize = extra.end;
+ const outputs: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra_i..][0..outputs_len]);
+ extra_i += outputs.len;
+ const inputs: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra_i..][0..extra.data.inputs_len]);
+ extra_i += inputs.len;
+
+ if (outputs.len > 1) {
+ return cg.todo("implement inline asm with more than 1 output", .{});
+ }
+
+ var as: Assembler = .{ .cg = cg };
+ defer as.deinit();
+
+ var output_extra_i = extra_i;
+ for (outputs) |output| {
+ if (output != .none) {
+ return cg.todo("implement inline asm with non-returned output", .{});
+ }
+ const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+ // TODO: Record output and use it somewhere.
+ }
+
+ for (inputs) |input| {
+ const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(extra_bytes, 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const input_ty = cg.typeOf(input);
+
+ if (std.mem.eql(u8, constraint, "c")) {
+ // constant
+ const val = (try cg.air.value(input, cg.pt)) orelse {
+ return cg.fail("assembly inputs with 'c' constraint have to be compile-time known", .{});
+ };
+
+ // TODO: This entire function should be handled a bit better...
+ const ip = &zcu.intern_pool;
+ switch (ip.indexToKey(val.toIntern())) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef => return cg.fail("assembly input with 'c' constraint cannot be undefined", .{}),
+
+ .int => try as.value_map.put(gpa, name, .{ .constant = @intCast(val.toUnsignedInt(zcu)) }),
+ .enum_literal => |str| try as.value_map.put(gpa, name, .{ .string = str.toSlice(ip) }),
+
+ else => unreachable, // TODO
+ }
+ } else if (std.mem.eql(u8, constraint, "t")) {
+ // type
+ if (input_ty.zigTypeTag(zcu) == .type) {
+ // This assembly input is a type instead of a value.
+ // That's fine for now, just make sure to resolve it as such.
+ const val = (try cg.air.value(input, cg.pt)).?;
+ const ty_id = try cg.resolveType(val.toType(), .direct);
+ try as.value_map.put(gpa, name, .{ .ty = ty_id });
+ } else {
+ const ty_id = try cg.resolveType(input_ty, .direct);
+ try as.value_map.put(gpa, name, .{ .ty = ty_id });
+ }
+ } else {
+ if (input_ty.zigTypeTag(zcu) == .type) {
+ return cg.fail("use the 't' constraint to supply types to SPIR-V inline assembly", .{});
+ }
+
+ const val_id = try cg.resolve(input);
+ try as.value_map.put(gpa, name, .{ .value = val_id });
+ }
+ }
+
+ // TODO: do something with clobbers
+ _ = extra.data.clobbers;
+
+ const asm_source = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..])[0..extra.data.source_len];
+
+ as.assemble(asm_source) catch |err| switch (err) {
+ error.AssembleFail => {
+ // TODO: For now the compiler only supports a single error message per decl,
+ // so to translate the possible multiple errors from the assembler, emit
+ // them as notes here.
+ // TODO: Translate proper error locations.
+ assert(as.errors.items.len != 0);
+ assert(cg.error_msg == null);
+ const src_loc = zcu.navSrcLoc(cg.owner_nav);
+ cg.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
+ const notes = try zcu.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len);
+
+ // Sub-scope to prevent `return error.CodegenFail` from running the errdefers.
+ {
+ errdefer zcu.gpa.free(notes);
+ var i: usize = 0;
+ errdefer for (notes[0..i]) |*note| {
+ note.deinit(zcu.gpa);
+ };
+
+ while (i < as.errors.items.len) : (i += 1) {
+ notes[i] = try Zcu.ErrorMsg.init(zcu.gpa, src_loc, "{s}", .{as.errors.items[i].msg});
+ }
+ }
+ cg.error_msg.?.notes = notes;
+ return error.CodegenFail;
+ },
+ else => |others| return others,
+ };
+
+ for (outputs) |output| {
+ _ = output;
+ const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[output_extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(cg.air.extra.items[output_extra_i..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ output_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
+
+ const result = as.value_map.get(name) orelse return {
+ return cg.fail("invalid asm output '{s}'", .{name});
+ };
+
+ switch (result) {
+ .just_declared, .unresolved_forward_reference => unreachable,
+ .ty => return cg.fail("cannot return spir-v type as value from assembly", .{}),
+ .value => |ref| return ref,
+ .constant, .string => return cg.fail("cannot return constant from assembly", .{}),
+ }
+
+ // TODO: Multiple results
+ // TODO: Check that the output type from assembly is the same as the type actually expected by Zig.
+ }
+
+ return null;
+}
+
+fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?Id {
+ _ = modifier;
+
+ const gpa = cg.module.gpa;
+ const zcu = cg.module.zcu;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = cg.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.args_len]);
+ const callee_ty = cg.typeOf(pl_op.operand);
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
+ .@"fn" => callee_ty,
+ .pointer => return cg.fail("cannot call function pointers", .{}),
+ else => unreachable,
+ };
+ const fn_info = zcu.typeToFunc(zig_fn_ty).?;
+ const return_type = fn_info.return_type;
+
+ const result_type_id = try cg.resolveFnReturnType(.fromInterned(return_type));
+ const result_id = cg.module.allocId();
+ const callee_id = try cg.resolve(pl_op.operand);
+
+ comptime assert(zig_call_abi_ver == 3);
+ const params = try gpa.alloc(Id, args.len);
+ defer gpa.free(params);
+ var n_params: usize = 0;
+ for (args) |arg| {
+ // Note: resolve() might emit instructions, so we need to call it
+ // before starting to emit OpFunctionCall instructions. Hence the
+ // temporary params buffer.
+ const arg_ty = cg.typeOf(arg);
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
+ const arg_id = try cg.resolve(arg);
+
+ params[n_params] = arg_id;
+ n_params += 1;
+ }
+
+ try cg.body.emit(cg.module.gpa, .OpFunctionCall, .{
+ .id_result_type = result_type_id,
+ .id_result = result_id,
+ .function = callee_id,
+ .id_ref_3 = params[0..n_params],
+ });
+
+ if (cg.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(zcu)) {
+ return null;
+ }
+
+ return result_id;
+}
+
+fn builtin3D(
+ cg: *CodeGen,
+ result_ty: Type,
+ builtin: spec.BuiltIn,
+ dimension: u32,
+ out_of_range_value: anytype,
+) !Id {
+ if (dimension >= 3) return try cg.constInt(result_ty, out_of_range_value);
+ const u32_ty_id = try cg.module.intType(.unsigned, 32);
+ const vec_ty_id = try cg.module.vectorType(3, u32_ty_id);
+ const ptr_ty_id = try cg.module.ptrType(vec_ty_id, .input);
+ const spv_decl_index = try cg.module.builtin(ptr_ty_id, builtin, .input);
+ try cg.decl_deps.put(cg.module.gpa, spv_decl_index, {});
+ const ptr_id = cg.module.declPtr(spv_decl_index).result_id;
+ const vec_id = cg.module.allocId();
+ try cg.body.emit(cg.module.gpa, .OpLoad, .{
+ .id_result_type = vec_ty_id,
+ .id_result = vec_id,
+ .pointer = ptr_id,
+ });
+ return try cg.extractVectorComponent(result_ty, vec_id, dimension);
+}
+
+fn airWorkItemId(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ if (cg.liveness.isUnused(inst)) return null;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const dimension = pl_op.payload;
+ return try cg.builtin3D(.u32, .local_invocation_id, dimension, 0);
+}
+
+// TODO: this must be an OpConstant/OpSpec but even then the driver crashes.
+fn airWorkGroupSize(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ if (cg.liveness.isUnused(inst)) return null;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const dimension = pl_op.payload;
+ return try cg.builtin3D(.u32, .workgroup_id, dimension, 0);
+}
+
+fn airWorkGroupId(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
+ if (cg.liveness.isUnused(inst)) return null;
+ const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const dimension = pl_op.payload;
+ return try cg.builtin3D(.u32, .workgroup_id, dimension, 0);
+}
+
+fn typeOf(cg: *CodeGen, inst: Air.Inst.Ref) Type {
+ const zcu = cg.module.zcu;
+ return cg.air.typeOf(inst, &zcu.intern_pool);
+}
+
+fn typeOfIndex(cg: *CodeGen, inst: Air.Inst.Index) Type {
+ const zcu = cg.module.zcu;
+ return cg.air.typeOfIndex(inst, &zcu.intern_pool);
+}
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
new file mode 100644
index 0000000000..acdbf376da
--- /dev/null
+++ b/src/codegen/spirv/Module.zig
@@ -0,0 +1,955 @@
+//! This structure represents a SPIR-V (sections) module being compiled, and keeps
+//! track of all relevant information. That includes the actual instructions, the
+//! current result-id bound, and data structures for querying result-id's of data
+//! which needs to be persistent over different calls to Decl code generation.
+//!
+//! A SPIR-V binary module supports both little- and big endian layout. The layout
+//! is detected by the magic word in the header. Therefore, we can ignore any byte
+//! order throughout the implementation, and just use the host byte order, and make
+//! this a problem for the consumer.
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+
+const Zcu = @import("../../Zcu.zig");
+const InternPool = @import("../../InternPool.zig");
+const Section = @import("Section.zig");
+const spec = @import("spec.zig");
+const Word = spec.Word;
+const Id = spec.Id;
+
+const Module = @This();
+
+gpa: Allocator,
+arena: Allocator,
+zcu: *Zcu,
+nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, Decl.Index) = .empty,
+uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, spec.StorageClass }, Decl.Index) = .empty,
+intern_map: std.AutoHashMapUnmanaged(struct { InternPool.Index, Repr }, Id) = .empty,
+decls: std.ArrayListUnmanaged(Decl) = .empty,
+decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
+entry_points: std.AutoArrayHashMapUnmanaged(Id, EntryPoint) = .empty,
+/// This map serves a dual purpose:
+/// - It keeps track of pointers that are currently being emitted, so that we can tell
+/// if they are recursive and need an OpTypeForwardPointer.
+/// - It caches pointers by child-type. This is required because sometimes we rely on
+/// ID-equality for pointers, and pointers constructed via `ptrType()` aren't interned
+/// via the usual `intern_map` mechanism.
+ptr_types: std.AutoHashMapUnmanaged(struct { Id, spec.StorageClass }, Id) = .{},
+/// For test declarations compiled for Vulkan target, we have to add a buffer.
+/// We only need to generate this once, this holds the link information related to that.
+error_buffer: ?Decl.Index = null,
+/// SPIR-V instructions return result-ids.
+/// This variable holds the module-wide counter for these.
+next_result_id: Word = 1,
+/// Some types shouldn't be emitted more than one time, but cannot be caught by
+/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
+/// types are the same, so we can't delay until the dedup pass. Therefore,
+/// this is an ad-hoc structure to cache types where required.
+/// According to the SPIR-V specification, section 2.8, this includes all non-aggregate
+/// non-pointer types.
+/// Additionally, this is used for other values which can be cached, for example,
+/// built-in variables.
+cache: struct {
+ bool_type: ?Id = null,
+ void_type: ?Id = null,
+ opaque_types: std.StringHashMapUnmanaged(Id) = .empty,
+ int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, Id) = .empty,
+ float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, Id) = .empty,
+ vector_types: std.AutoHashMapUnmanaged(struct { Id, u32 }, Id) = .empty,
+ array_types: std.AutoHashMapUnmanaged(struct { Id, Id }, Id) = .empty,
+ struct_types: std.ArrayHashMapUnmanaged(StructType, Id, StructType.HashContext, true) = .empty,
+ fn_types: std.ArrayHashMapUnmanaged(FnType, Id, FnType.HashContext, true) = .empty,
+
+ capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
+ extensions: std.StringHashMapUnmanaged(void) = .empty,
+ extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, Id) = .empty,
+ decorations: std.AutoHashMapUnmanaged(struct { Id, spec.Decoration }, void) = .empty,
+ builtins: std.AutoHashMapUnmanaged(struct { spec.BuiltIn, spec.StorageClass }, Decl.Index) = .empty,
+ strings: std.StringArrayHashMapUnmanaged(Id) = .empty,
+
+ bool_const: [2]?Id = .{ null, null },
+ constants: std.ArrayHashMapUnmanaged(Constant, Id, Constant.HashContext, true) = .empty,
+} = .{},
+/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
+sections: struct {
+ capabilities: Section = .{},
+ extensions: Section = .{},
+ extended_instruction_set: Section = .{},
+ memory_model: Section = .{},
+ execution_modes: Section = .{},
+ debug_strings: Section = .{},
+ debug_names: Section = .{},
+ annotations: Section = .{},
+ globals: Section = .{},
+ functions: Section = .{},
+} = .{},
+
+pub const big_int_bits = 32;
+
+/// Data can be lowered into in two basic representations: indirect, which is when
+/// a type is stored in memory, and direct, which is how a type is stored when its
+/// a direct SPIR-V value.
+pub const Repr = enum {
+ /// A SPIR-V value as it would be used in operations.
+ direct,
+ /// A SPIR-V value as it is stored in memory.
+ indirect,
+};
+
+/// Declarations, both functions and globals, can have dependencies. These are used for 2 things:
+/// - Globals must be declared before they are used, also between globals. The compiler processes
+/// globals unordered, so we must use the dependencies here to figure out how to order the globals
+/// in the final module. The Globals structure is also used for that.
+/// - Entry points must declare the complete list of OpVariable instructions that they access.
+/// For these we use the same dependency structure.
+/// In this mechanism, globals will only depend on other globals, while functions may depend on
+/// globals or other functions.
+pub const Decl = struct {
+ /// Index to refer to a Decl by.
+ pub const Index = enum(u32) { _ };
+
+ /// Useful to tell what kind of decl this is, and hold the result-id or field index
+ /// to be used for this decl.
+ pub const Kind = enum {
+ func,
+ global,
+ invocation_global,
+ };
+
+ /// See comment on Kind
+ kind: Kind,
+ /// The result-id associated to this decl. The specific meaning of this depends on `kind`:
+ /// - For `func`, this is the result-id of the associated OpFunction instruction.
+ /// - For `global`, this is the result-id of the associated OpVariable instruction.
+ /// - For `invocation_global`, this is the result-id of the associated InvocationGlobal instruction.
+ result_id: Id,
+ /// The offset of the first dependency of this decl in the `decl_deps` array.
+ begin_dep: u32,
+ /// The past-end offset of the dependencies of this decl in the `decl_deps` array.
+ end_dep: u32,
+};
+
+/// This models a kernel entry point.
+pub const EntryPoint = struct {
+ /// The declaration that should be exported.
+ decl_index: Decl.Index,
+ /// The name of the kernel to be exported.
+ name: []const u8,
+ /// Calling Convention
+ exec_model: spec.ExecutionModel,
+ exec_mode: ?spec.ExecutionMode = null,
+};
+
+const StructType = struct {
+ fields: []const Id,
+ ip_index: InternPool.Index,
+
+ const HashContext = struct {
+ pub fn hash(_: @This(), ty: StructType) u32 {
+ var hasher = std.hash.Wyhash.init(0);
+ hasher.update(std.mem.sliceAsBytes(ty.fields));
+ hasher.update(std.mem.asBytes(&ty.ip_index));
+ return @truncate(hasher.final());
+ }
+
+ pub fn eql(_: @This(), a: StructType, b: StructType, _: usize) bool {
+ return a.ip_index == b.ip_index and std.mem.eql(Id, a.fields, b.fields);
+ }
+ };
+};
+
+const FnType = struct {
+ return_ty: Id,
+ params: []const Id,
+
+ const HashContext = struct {
+ pub fn hash(_: @This(), ty: FnType) u32 {
+ var hasher = std.hash.Wyhash.init(0);
+ hasher.update(std.mem.asBytes(&ty.return_ty));
+ hasher.update(std.mem.sliceAsBytes(ty.params));
+ return @truncate(hasher.final());
+ }
+
+ pub fn eql(_: @This(), a: FnType, b: FnType, _: usize) bool {
+ return a.return_ty == b.return_ty and
+ std.mem.eql(Id, a.params, b.params);
+ }
+ };
+};
+
+const Constant = struct {
+ ty: Id,
+ value: spec.LiteralContextDependentNumber,
+
+ const HashContext = struct {
+ pub fn hash(_: @This(), value: Constant) u32 {
+ const Tag = @typeInfo(spec.LiteralContextDependentNumber).@"union".tag_type.?;
+ var hasher = std.hash.Wyhash.init(0);
+ hasher.update(std.mem.asBytes(&value.ty));
+ hasher.update(std.mem.asBytes(&@as(Tag, value.value)));
+ switch (value.value) {
+ inline else => |v| hasher.update(std.mem.asBytes(&v)),
+ }
+ return @truncate(hasher.final());
+ }
+
+ pub fn eql(_: @This(), a: Constant, b: Constant, _: usize) bool {
+ if (a.ty != b.ty) return false;
+ const Tag = @typeInfo(spec.LiteralContextDependentNumber).@"union".tag_type.?;
+ if (@as(Tag, a.value) != @as(Tag, b.value)) return false;
+ return switch (a.value) {
+ inline else => |v, tag| v == @field(b.value, @tagName(tag)),
+ };
+ }
+ };
+};
+
+pub fn deinit(module: *Module) void {
+ module.nav_link.deinit(module.gpa);
+ module.uav_link.deinit(module.gpa);
+ module.intern_map.deinit(module.gpa);
+ module.ptr_types.deinit(module.gpa);
+
+ module.sections.capabilities.deinit(module.gpa);
+ module.sections.extensions.deinit(module.gpa);
+ module.sections.extended_instruction_set.deinit(module.gpa);
+ module.sections.memory_model.deinit(module.gpa);
+ module.sections.execution_modes.deinit(module.gpa);
+ module.sections.debug_strings.deinit(module.gpa);
+ module.sections.debug_names.deinit(module.gpa);
+ module.sections.annotations.deinit(module.gpa);
+ module.sections.globals.deinit(module.gpa);
+ module.sections.functions.deinit(module.gpa);
+
+ module.cache.opaque_types.deinit(module.gpa);
+ module.cache.int_types.deinit(module.gpa);
+ module.cache.float_types.deinit(module.gpa);
+ module.cache.vector_types.deinit(module.gpa);
+ module.cache.array_types.deinit(module.gpa);
+ module.cache.struct_types.deinit(module.gpa);
+ module.cache.fn_types.deinit(module.gpa);
+ module.cache.capabilities.deinit(module.gpa);
+ module.cache.extensions.deinit(module.gpa);
+ module.cache.extended_instruction_set.deinit(module.gpa);
+ module.cache.decorations.deinit(module.gpa);
+ module.cache.builtins.deinit(module.gpa);
+ module.cache.strings.deinit(module.gpa);
+
+ module.cache.constants.deinit(module.gpa);
+
+ module.decls.deinit(module.gpa);
+ module.decl_deps.deinit(module.gpa);
+ module.entry_points.deinit(module.gpa);
+
+ module.* = undefined;
+}
+
+/// Fetch or allocate a result id for nav index. This function also marks the nav as alive.
+/// Note: Function does not actually generate the nav, it just allocates an index.
+pub fn resolveNav(module: *Module, ip: *InternPool, nav_index: InternPool.Nav.Index) !Decl.Index {
+ const entry = try module.nav_link.getOrPut(module.gpa, nav_index);
+ if (!entry.found_existing) {
+ const nav = ip.getNav(nav_index);
+ // TODO: Extern fn?
+ const kind: Decl.Kind = if (ip.isFunctionType(nav.typeOf(ip)))
+ .func
+ else switch (nav.getAddrspace()) {
+ .generic => .invocation_global,
+ else => .global,
+ };
+
+ entry.value_ptr.* = try module.allocDecl(kind);
+ }
+
+ return entry.value_ptr.*;
+}
+
+pub fn allocIds(module: *Module, n: u32) spec.IdRange {
+ defer module.next_result_id += n;
+ return .{ .base = module.next_result_id, .len = n };
+}
+
+pub fn allocId(module: *Module) Id {
+ return module.allocIds(1).at(0);
+}
+
+pub fn idBound(module: Module) Word {
+ return module.next_result_id;
+}
+
+pub fn addEntryPointDeps(
+ module: *Module,
+ decl_index: Decl.Index,
+ seen: *std.DynamicBitSetUnmanaged,
+ interface: *std.ArrayList(Id),
+) !void {
+ const decl = module.declPtr(decl_index);
+ const deps = module.decl_deps.items[decl.begin_dep..decl.end_dep];
+
+ if (seen.isSet(@intFromEnum(decl_index))) {
+ return;
+ }
+
+ seen.set(@intFromEnum(decl_index));
+
+ if (decl.kind == .global) {
+ try interface.append(decl.result_id);
+ }
+
+ for (deps) |dep| {
+ try module.addEntryPointDeps(dep, seen, interface);
+ }
+}
+
+fn entryPoints(module: *Module) !Section {
+ const target = module.zcu.getTarget();
+
+ var entry_points = Section{};
+ errdefer entry_points.deinit(module.gpa);
+
+ var interface = std.ArrayList(Id).init(module.gpa);
+ defer interface.deinit();
+
+ var seen = try std.DynamicBitSetUnmanaged.initEmpty(module.gpa, module.decls.items.len);
+ defer seen.deinit(module.gpa);
+
+ for (module.entry_points.keys(), module.entry_points.values()) |entry_point_id, entry_point| {
+ interface.items.len = 0;
+ seen.setRangeValue(.{ .start = 0, .end = module.decls.items.len }, false);
+
+ try module.addEntryPointDeps(entry_point.decl_index, &seen, &interface);
+ try entry_points.emit(module.gpa, .OpEntryPoint, .{
+ .execution_model = entry_point.exec_model,
+ .entry_point = entry_point_id,
+ .name = entry_point.name,
+ .interface = interface.items,
+ });
+
+ if (entry_point.exec_mode == null and entry_point.exec_model == .fragment) {
+ switch (target.os.tag) {
+ .vulkan, .opengl => |tag| {
+ try module.sections.execution_modes.emit(module.gpa, .OpExecutionMode, .{
+ .entry_point = entry_point_id,
+ .mode = if (tag == .vulkan) .origin_upper_left else .origin_lower_left,
+ });
+ },
+ .opencl => {},
+ else => unreachable,
+ }
+ }
+ }
+
+ return entry_points;
+}
+
+pub fn finalize(module: *Module, gpa: Allocator) ![]Word {
+ const target = module.zcu.getTarget();
+
+ // Emit capabilities and extensions
+ switch (target.os.tag) {
+ .opengl => {
+ try module.addCapability(.shader);
+ try module.addCapability(.matrix);
+ },
+ .vulkan => {
+ try module.addCapability(.shader);
+ try module.addCapability(.matrix);
+ if (target.cpu.arch == .spirv64) {
+ try module.addExtension("SPV_KHR_physical_storage_buffer");
+ try module.addCapability(.physical_storage_buffer_addresses);
+ }
+ },
+ .opencl, .amdhsa => {
+ try module.addCapability(.kernel);
+ try module.addCapability(.addresses);
+ },
+ else => unreachable,
+ }
+ if (target.cpu.arch == .spirv64) try module.addCapability(.int64);
+ if (target.cpu.has(.spirv, .int64)) try module.addCapability(.int64);
+ if (target.cpu.has(.spirv, .float16)) {
+ if (target.os.tag == .opencl) try module.addExtension("cl_khr_fp16");
+ try module.addCapability(.float16);
+ }
+ if (target.cpu.has(.spirv, .float64)) try module.addCapability(.float64);
+ if (target.cpu.has(.spirv, .generic_pointer)) try module.addCapability(.generic_pointer);
+ if (target.cpu.has(.spirv, .vector16)) try module.addCapability(.vector16);
+ if (target.cpu.has(.spirv, .storage_push_constant16)) {
+ try module.addExtension("SPV_KHR_16bit_storage");
+ try module.addCapability(.storage_push_constant16);
+ }
+ if (target.cpu.has(.spirv, .arbitrary_precision_integers)) {
+ try module.addExtension("SPV_INTEL_arbitrary_precision_integers");
+ try module.addCapability(.arbitrary_precision_integers_intel);
+ }
+ if (target.cpu.has(.spirv, .variable_pointers)) {
+ try module.addExtension("SPV_KHR_variable_pointers");
+ try module.addCapability(.variable_pointers_storage_buffer);
+ try module.addCapability(.variable_pointers);
+ }
+ // These are well supported
+ try module.addCapability(.int8);
+ try module.addCapability(.int16);
+
+ // Emit memory model
+ const addressing_model: spec.AddressingModel = switch (target.os.tag) {
+ .opengl => .logical,
+ .vulkan => if (target.cpu.arch == .spirv32) .logical else .physical_storage_buffer64,
+ .opencl => if (target.cpu.arch == .spirv32) .physical32 else .physical64,
+ .amdhsa => .physical64,
+ else => unreachable,
+ };
+ try module.sections.memory_model.emit(module.gpa, .OpMemoryModel, .{
+ .addressing_model = addressing_model,
+ .memory_model = switch (target.os.tag) {
+ .opencl => .open_cl,
+ .vulkan, .opengl => .glsl450,
+ else => unreachable,
+ },
+ });
+
+ var entry_points = try module.entryPoints();
+ defer entry_points.deinit(module.gpa);
+
+ const version: spec.Version = .{
+ .major = 1,
+ .minor = blk: {
+ // Prefer higher versions
+ if (target.cpu.has(.spirv, .v1_6)) break :blk 6;
+ if (target.cpu.has(.spirv, .v1_5)) break :blk 5;
+ if (target.cpu.has(.spirv, .v1_4)) break :blk 4;
+ if (target.cpu.has(.spirv, .v1_3)) break :blk 3;
+ if (target.cpu.has(.spirv, .v1_2)) break :blk 2;
+ if (target.cpu.has(.spirv, .v1_1)) break :blk 1;
+ break :blk 0;
+ },
+ };
+
+ const header = [_]Word{
+ spec.magic_number,
+ version.toWord(),
+ spec.zig_generator_id,
+ module.idBound(),
+ 0, // Schema (currently reserved for future use)
+ };
+
+ var source = Section{};
+ defer source.deinit(module.gpa);
+ try module.sections.debug_strings.emit(module.gpa, .OpSource, .{
+ .source_language = .zig,
+ .version = 0,
+ // We cannot emit these because the Khronos translator does not parse this instruction
+ // correctly.
+ // See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/2188
+ .file = null,
+ .source = null,
+ });
+
+ // Note: needs to be kept in order according to section 2.3!
+ const buffers = &[_][]const Word{
+ &header,
+ module.sections.capabilities.toWords(),
+ module.sections.extensions.toWords(),
+ module.sections.extended_instruction_set.toWords(),
+ module.sections.memory_model.toWords(),
+ entry_points.toWords(),
+ module.sections.execution_modes.toWords(),
+ source.toWords(),
+ module.sections.debug_strings.toWords(),
+ module.sections.debug_names.toWords(),
+ module.sections.annotations.toWords(),
+ module.sections.globals.toWords(),
+ module.sections.functions.toWords(),
+ };
+
+ var total_result_size: usize = 0;
+ for (buffers) |buffer| {
+ total_result_size += buffer.len;
+ }
+ const result = try gpa.alloc(Word, total_result_size);
+ errdefer comptime unreachable;
+
+ var offset: usize = 0;
+ for (buffers) |buffer| {
+ @memcpy(result[offset..][0..buffer.len], buffer);
+ offset += buffer.len;
+ }
+
+ return result;
+}
+
+pub fn addCapability(module: *Module, cap: spec.Capability) !void {
+ const entry = try module.cache.capabilities.getOrPut(module.gpa, cap);
+ if (entry.found_existing) return;
+ try module.sections.capabilities.emit(module.gpa, .OpCapability, .{ .capability = cap });
+}
+
+pub fn addExtension(module: *Module, ext: []const u8) !void {
+ const entry = try module.cache.extensions.getOrPut(module.gpa, ext);
+ if (entry.found_existing) return;
+ try module.sections.extensions.emit(module.gpa, .OpExtension, .{ .name = ext });
+}
+
+/// Imports or returns the existing id of an extended instruction set
+pub fn importInstructionSet(module: *Module, set: spec.InstructionSet) !Id {
+ assert(set != .core);
+
+ const gop = try module.cache.extended_instruction_set.getOrPut(module.gpa, set);
+ if (gop.found_existing) return gop.value_ptr.*;
+
+ const result_id = module.allocId();
+ try module.sections.extended_instruction_set.emit(module.gpa, .OpExtInstImport, .{
+ .id_result = result_id,
+ .name = @tagName(set),
+ });
+ gop.value_ptr.* = result_id;
+
+ return result_id;
+}
+
+pub fn boolType(module: *Module) !Id {
+ if (module.cache.bool_type) |id| return id;
+
+ const result_id = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpTypeBool, .{
+ .id_result = result_id,
+ });
+ module.cache.bool_type = result_id;
+ return result_id;
+}
+
+pub fn voidType(module: *Module) !Id {
+ if (module.cache.void_type) |id| return id;
+
+ const result_id = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpTypeVoid, .{
+ .id_result = result_id,
+ });
+ module.cache.void_type = result_id;
+ try module.debugName(result_id, "void");
+ return result_id;
+}
+
+pub fn opaqueType(module: *Module, name: []const u8) !Id {
+ if (module.cache.opaque_types.get(name)) |id| return id;
+ const result_id = module.allocId();
+ const name_dup = try module.arena.dupe(u8, name);
+ try module.sections.globals.emit(module.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = name_dup,
+ });
+ try module.debugName(result_id, name_dup);
+ try module.cache.opaque_types.put(module.gpa, name_dup, result_id);
+ return result_id;
+}
+
+pub fn backingIntBits(module: *Module, bits: u16) struct { u16, bool } {
+ assert(bits != 0);
+ const target = module.zcu.getTarget();
+
+ if (target.cpu.has(.spirv, .arbitrary_precision_integers) and bits <= 32) {
+ return .{ bits, false };
+ }
+
+ // We require Int8 and Int16 capabilities and benefit Int64 when available.
+ // 32-bit integers are always supported (see spec, 2.16.1, Data rules).
+ const ints = [_]struct { bits: u16, enabled: bool }{
+ .{ .bits = 8, .enabled = true },
+ .{ .bits = 16, .enabled = true },
+ .{ .bits = 32, .enabled = true },
+ .{
+ .bits = 64,
+ .enabled = target.cpu.has(.spirv, .int64) or target.cpu.arch == .spirv64,
+ },
+ };
+
+ for (ints) |int| {
+ if (bits <= int.bits and int.enabled) return .{ int.bits, false };
+ }
+
+ // Big int
+ return .{ std.mem.alignForward(u16, bits, big_int_bits), true };
+}
+
+pub fn intType(module: *Module, signedness: std.builtin.Signedness, bits: u16) !Id {
+ assert(bits > 0);
+
+ const target = module.zcu.getTarget();
+ const actual_signedness = switch (target.os.tag) {
+ // Kernel only supports unsigned ints.
+ .opencl, .amdhsa => .unsigned,
+ else => signedness,
+ };
+ const backing_bits, const big_int = module.backingIntBits(bits);
+ if (big_int) {
+ // TODO: support composite integers larger than 64 bit
+ assert(backing_bits <= 64);
+ const u32_ty = try module.intType(.unsigned, 32);
+ const len_id = try module.constant(u32_ty, .{ .uint32 = backing_bits / big_int_bits });
+ return module.arrayType(len_id, u32_ty);
+ }
+
+ const entry = try module.cache.int_types.getOrPut(module.gpa, .{ .signedness = actual_signedness, .bits = backing_bits });
+ if (!entry.found_existing) {
+ const result_id = module.allocId();
+ entry.value_ptr.* = result_id;
+ try module.sections.globals.emit(module.gpa, .OpTypeInt, .{
+ .id_result = result_id,
+ .width = backing_bits,
+ .signedness = switch (actual_signedness) {
+ .signed => 1,
+ .unsigned => 0,
+ },
+ });
+
+ switch (actual_signedness) {
+ .signed => try module.debugNameFmt(result_id, "i{}", .{backing_bits}),
+ .unsigned => try module.debugNameFmt(result_id, "u{}", .{backing_bits}),
+ }
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn floatType(module: *Module, bits: u16) !Id {
+ assert(bits > 0);
+ const entry = try module.cache.float_types.getOrPut(module.gpa, .{ .bits = bits });
+ if (!entry.found_existing) {
+ const result_id = module.allocId();
+ entry.value_ptr.* = result_id;
+ try module.sections.globals.emit(module.gpa, .OpTypeFloat, .{
+ .id_result = result_id,
+ .width = bits,
+ });
+ try module.debugNameFmt(result_id, "f{}", .{bits});
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn vectorType(module: *Module, len: u32, child_ty_id: Id) !Id {
+ const entry = try module.cache.vector_types.getOrPut(module.gpa, .{ child_ty_id, len });
+ if (!entry.found_existing) {
+ const result_id = module.allocId();
+ entry.value_ptr.* = result_id;
+ try module.sections.globals.emit(module.gpa, .OpTypeVector, .{
+ .id_result = result_id,
+ .component_type = child_ty_id,
+ .component_count = len,
+ });
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn arrayType(module: *Module, len_id: Id, child_ty_id: Id) !Id {
+ const entry = try module.cache.array_types.getOrPut(module.gpa, .{ child_ty_id, len_id });
+ if (!entry.found_existing) {
+ const result_id = module.allocId();
+ entry.value_ptr.* = result_id;
+ try module.sections.globals.emit(module.gpa, .OpTypeArray, .{
+ .id_result = result_id,
+ .element_type = child_ty_id,
+ .length = len_id,
+ });
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn ptrType(module: *Module, child_ty_id: Id, storage_class: spec.StorageClass) !Id {
+ const key = .{ child_ty_id, storage_class };
+ const gop = try module.ptr_types.getOrPut(module.gpa, key);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpTypePointer, .{
+ .id_result = gop.value_ptr.*,
+ .storage_class = storage_class,
+ .type = child_ty_id,
+ });
+ return gop.value_ptr.*;
+ }
+ return gop.value_ptr.*;
+}
+
+pub fn structType(
+ module: *Module,
+ types: []const Id,
+ maybe_names: ?[]const []const u8,
+ maybe_offsets: ?[]const u32,
+ ip_index: InternPool.Index,
+) !Id {
+ const target = module.zcu.getTarget();
+
+ if (module.cache.struct_types.get(.{ .fields = types, .ip_index = ip_index })) |id| return id;
+ const result_id = module.allocId();
+ const types_dup = try module.arena.dupe(Id, types);
+ try module.sections.globals.emit(module.gpa, .OpTypeStruct, .{
+ .id_result = result_id,
+ .id_ref = types_dup,
+ });
+
+ if (maybe_names) |names| {
+ assert(names.len == types.len);
+ for (names, 0..) |name, i| {
+ try module.memberDebugName(result_id, @intCast(i), name);
+ }
+ }
+
+ switch (target.os.tag) {
+ .vulkan, .opengl => {
+ if (maybe_offsets) |offsets| {
+ assert(offsets.len == types.len);
+ for (offsets, 0..) |offset, i| {
+ try module.decorateMember(
+ result_id,
+ @intCast(i),
+ .{ .offset = .{ .byte_offset = offset } },
+ );
+ }
+ }
+ },
+ else => {},
+ }
+
+ try module.cache.struct_types.put(
+ module.gpa,
+ .{
+ .fields = types_dup,
+ .ip_index = if (module.zcu.comp.config.root_strip) .none else ip_index,
+ },
+ result_id,
+ );
+ return result_id;
+}
+
+pub fn functionType(module: *Module, return_ty_id: Id, param_type_ids: []const Id) !Id {
+ if (module.cache.fn_types.get(.{
+ .return_ty = return_ty_id,
+ .params = param_type_ids,
+ })) |id| return id;
+ const result_id = module.allocId();
+ const params_dup = try module.arena.dupe(Id, param_type_ids);
+ try module.sections.globals.emit(module.gpa, .OpTypeFunction, .{
+ .id_result = result_id,
+ .return_type = return_ty_id,
+ .id_ref_2 = params_dup,
+ });
+ try module.cache.fn_types.put(module.gpa, .{
+ .return_ty = return_ty_id,
+ .params = params_dup,
+ }, result_id);
+ return result_id;
+}
+
+pub fn constant(module: *Module, ty_id: Id, value: spec.LiteralContextDependentNumber) !Id {
+ const gop = try module.cache.constants.getOrPut(module.gpa, .{ .ty = ty_id, .value = value });
+ if (!gop.found_existing) {
+ gop.value_ptr.* = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpConstant, .{
+ .id_result_type = ty_id,
+ .id_result = gop.value_ptr.*,
+ .value = value,
+ });
+ }
+ return gop.value_ptr.*;
+}
+
+pub fn constBool(module: *Module, value: bool) !Id {
+ if (module.cache.bool_const[@intFromBool(value)]) |b| return b;
+
+ const result_ty_id = try module.boolType();
+ const result_id = module.allocId();
+ module.cache.bool_const[@intFromBool(value)] = result_id;
+
+ switch (value) {
+ inline else => |value_ct| try module.sections.globals.emit(
+ module.gpa,
+ if (value_ct) .OpConstantTrue else .OpConstantFalse,
+ .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ },
+ ),
+ }
+
+ return result_id;
+}
+
+pub fn builtin(
+ module: *Module,
+ result_ty_id: Id,
+ spirv_builtin: spec.BuiltIn,
+ storage_class: spec.StorageClass,
+) !Decl.Index {
+ const gop = try module.cache.builtins.getOrPut(module.gpa, .{ spirv_builtin, storage_class });
+ if (!gop.found_existing) {
+ const decl_index = try module.allocDecl(.global);
+ const result_id = module.declPtr(decl_index).result_id;
+ gop.value_ptr.* = decl_index;
+ try module.sections.globals.emit(module.gpa, .OpVariable, .{
+ .id_result_type = result_ty_id,
+ .id_result = result_id,
+ .storage_class = storage_class,
+ });
+ try module.decorate(result_id, .{ .built_in = .{ .built_in = spirv_builtin } });
+ try module.declareDeclDeps(decl_index, &.{});
+ }
+ return gop.value_ptr.*;
+}
+
+pub fn constUndef(module: *Module, ty_id: Id) !Id {
+ const result_id = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpUndef, .{
+ .id_result_type = ty_id,
+ .id_result = result_id,
+ });
+ return result_id;
+}
+
+pub fn constNull(module: *Module, ty_id: Id) !Id {
+ const result_id = module.allocId();
+ try module.sections.globals.emit(module.gpa, .OpConstantNull, .{
+ .id_result_type = ty_id,
+ .id_result = result_id,
+ });
+ return result_id;
+}
+
+/// Decorate a result-id.
+pub fn decorate(
+ module: *Module,
+ target: Id,
+ decoration: spec.Decoration.Extended,
+) !void {
+ const gop = try module.cache.decorations.getOrPut(module.gpa, .{ target, decoration });
+ if (!gop.found_existing) {
+ try module.sections.annotations.emit(module.gpa, .OpDecorate, .{
+ .target = target,
+ .decoration = decoration,
+ });
+ }
+}
+
+/// Decorate a result-id which is a member of some struct.
+/// We really don't have to and shouldn't need to cache this.
+pub fn decorateMember(
+ module: *Module,
+ structure_type: Id,
+ member: u32,
+ decoration: spec.Decoration.Extended,
+) !void {
+ try module.sections.annotations.emit(module.gpa, .OpMemberDecorate, .{
+ .structure_type = structure_type,
+ .member = member,
+ .decoration = decoration,
+ });
+}
+
+pub fn allocDecl(module: *Module, kind: Decl.Kind) !Decl.Index {
+ try module.decls.append(module.gpa, .{
+ .kind = kind,
+ .result_id = module.allocId(),
+ .begin_dep = undefined,
+ .end_dep = undefined,
+ });
+
+ return @as(Decl.Index, @enumFromInt(@as(u32, @intCast(module.decls.items.len - 1))));
+}
+
+pub fn declPtr(module: *Module, index: Decl.Index) *Decl {
+ return &module.decls.items[@intFromEnum(index)];
+}
+
+/// Declare ALL dependencies for a decl.
+pub fn declareDeclDeps(module: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void {
+ const begin_dep: u32 = @intCast(module.decl_deps.items.len);
+ try module.decl_deps.appendSlice(module.gpa, deps);
+ const end_dep: u32 = @intCast(module.decl_deps.items.len);
+
+ const decl = module.declPtr(decl_index);
+ decl.begin_dep = begin_dep;
+ decl.end_dep = end_dep;
+}
+
+/// Declare a SPIR-V function as an entry point. This causes an extra wrapper
+/// function to be generated, which is then exported as the real entry point. The purpose of this
+/// wrapper is to allocate and initialize the structure holding the instance globals.
+pub fn declareEntryPoint(
+ module: *Module,
+ decl_index: Decl.Index,
+ name: []const u8,
+ exec_model: spec.ExecutionModel,
+ exec_mode: ?spec.ExecutionMode,
+) !void {
+ const gop = try module.entry_points.getOrPut(module.gpa, module.declPtr(decl_index).result_id);
+ gop.value_ptr.decl_index = decl_index;
+ gop.value_ptr.name = name;
+ gop.value_ptr.exec_model = exec_model;
+ // Might've been set by assembler
+ if (!gop.found_existing) gop.value_ptr.exec_mode = exec_mode;
+}
+
+pub fn debugName(module: *Module, target: Id, name: []const u8) !void {
+ try module.sections.debug_names.emit(module.gpa, .OpName, .{
+ .target = target,
+ .name = name,
+ });
+}
+
+pub fn debugNameFmt(module: *Module, target: Id, comptime fmt: []const u8, args: anytype) !void {
+ const name = try std.fmt.allocPrint(module.gpa, fmt, args);
+ defer module.gpa.free(name);
+ try module.debugName(target, name);
+}
+
+pub fn memberDebugName(module: *Module, target: Id, member: u32, name: []const u8) !void {
+ try module.sections.debug_names.emit(module.gpa, .OpMemberName, .{
+ .type = target,
+ .member = member,
+ .name = name,
+ });
+}
+
+pub fn debugString(module: *Module, string: []const u8) !Id {
+ const entry = try module.cache.strings.getOrPut(module.gpa, string);
+ if (!entry.found_existing) {
+ entry.value_ptr.* = module.allocId();
+ try module.sections.debug_strings.emit(module.gpa, .OpString, .{
+ .id_result = entry.value_ptr.*,
+ .string = string,
+ });
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn storageClass(module: *Module, as: std.builtin.AddressSpace) spec.StorageClass {
+ const target = module.zcu.getTarget();
+ return switch (as) {
+ .generic => .function,
+ .global => switch (target.os.tag) {
+ .opencl, .amdhsa => .cross_workgroup,
+ else => .storage_buffer,
+ },
+ .push_constant => .push_constant,
+ .output => .output,
+ .uniform => .uniform,
+ .storage_buffer => .storage_buffer,
+ .physical_storage_buffer => .physical_storage_buffer,
+ .constant => .uniform_constant,
+ .shared => .workgroup,
+ .local => .function,
+ .input => .input,
+ .gs,
+ .fs,
+ .ss,
+ .param,
+ .flash,
+ .flash1,
+ .flash2,
+ .flash3,
+ .flash4,
+ .flash5,
+ .cog,
+ .lut,
+ .hub,
+ => unreachable,
+ };
+}
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
new file mode 100644
index 0000000000..b5851c3b7c
--- /dev/null
+++ b/src/codegen/spirv/Section.zig
@@ -0,0 +1,282 @@
+//! Represents a section or subsection of instructions in a SPIR-V binary. Instructions can be append
+//! to separate sections, which can then later be merged into the final binary.
+const Section = @This();
+
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const testing = std.testing;
+
+const spec = @import("spec.zig");
+const Word = spec.Word;
+const DoubleWord = std.meta.Int(.unsigned, @bitSizeOf(Word) * 2);
+const Log2Word = std.math.Log2Int(Word);
+
+const Opcode = spec.Opcode;
+
+instructions: std.ArrayListUnmanaged(Word) = .empty,
+
+pub fn deinit(section: *Section, allocator: Allocator) void {
+ section.instructions.deinit(allocator);
+ section.* = undefined;
+}
+
+pub fn reset(section: *Section) void {
+ section.instructions.items.len = 0;
+}
+
+pub fn toWords(section: Section) []Word {
+ return section.instructions.items;
+}
+
+/// Append the instructions from another section into this section.
+pub fn append(section: *Section, allocator: Allocator, other_section: Section) !void {
+ try section.instructions.appendSlice(allocator, other_section.instructions.items);
+}
+
+pub fn ensureUnusedCapacity(
+ section: *Section,
+ allocator: Allocator,
+ words: usize,
+) !void {
+ try section.instructions.ensureUnusedCapacity(allocator, words);
+}
+
+/// Write an instruction and size, operands are to be inserted manually.
+pub fn emitRaw(
+ section: *Section,
+ allocator: Allocator,
+ opcode: Opcode,
+ operand_words: usize,
+) !void {
+ const word_count = 1 + operand_words;
+ try section.instructions.ensureUnusedCapacity(allocator, word_count);
+ section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode));
+}
+
+/// Write an entire instruction, including all operands
+pub fn emitRawInstruction(
+ section: *Section,
+ allocator: Allocator,
+ opcode: Opcode,
+ operands: []const Word,
+) !void {
+ try section.emitRaw(allocator, opcode, operands.len);
+ section.writeWords(operands);
+}
+
+pub fn emitAssumeCapacity(
+ section: *Section,
+ comptime opcode: spec.Opcode,
+ operands: opcode.Operands(),
+) !void {
+ const word_count = instructionSize(opcode, operands);
+ section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode));
+ section.writeOperands(opcode.Operands(), operands);
+}
+
+pub fn emit(
+ section: *Section,
+ allocator: Allocator,
+ comptime opcode: spec.Opcode,
+ operands: opcode.Operands(),
+) !void {
+ const word_count = instructionSize(opcode, operands);
+ try section.instructions.ensureUnusedCapacity(allocator, word_count);
+ section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode));
+ section.writeOperands(opcode.Operands(), operands);
+}
+
+pub fn emitBranch(
+ section: *Section,
+ allocator: Allocator,
+ target_label: spec.Id,
+) !void {
+ try section.emit(allocator, .OpBranch, .{
+ .target_label = target_label,
+ });
+}
+
+pub fn writeWord(section: *Section, word: Word) void {
+ section.instructions.appendAssumeCapacity(word);
+}
+
+pub fn writeWords(section: *Section, words: []const Word) void {
+ section.instructions.appendSliceAssumeCapacity(words);
+}
+
+pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
+ section.writeWords(&.{
+ @truncate(dword),
+ @truncate(dword >> @bitSizeOf(Word)),
+ });
+}
+
+fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void {
+ const fields = switch (@typeInfo(Operands)) {
+ .@"struct" => |info| info.fields,
+ .void => return,
+ else => unreachable,
+ };
+ inline for (fields) |field| {
+ section.writeOperand(field.type, @field(operands, field.name));
+ }
+}
+
+pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void {
+ switch (Operand) {
+ spec.LiteralSpecConstantOpInteger => unreachable,
+ spec.Id => section.writeWord(@intFromEnum(operand)),
+ spec.LiteralInteger => section.writeWord(operand),
+ spec.LiteralString => section.writeString(operand),
+ spec.LiteralContextDependentNumber => section.writeContextDependentNumber(operand),
+ spec.LiteralExtInstInteger => section.writeWord(operand.inst),
+ spec.PairLiteralIntegerIdRef => section.writeWords(&.{ operand.value, @enumFromInt(operand.label) }),
+ spec.PairIdRefLiteralInteger => section.writeWords(&.{ @intFromEnum(operand.target), operand.member }),
+ spec.PairIdRefIdRef => section.writeWords(&.{ @intFromEnum(operand[0]), @intFromEnum(operand[1]) }),
+ else => switch (@typeInfo(Operand)) {
+ .@"enum" => section.writeWord(@intFromEnum(operand)),
+ .optional => |info| if (operand) |child| section.writeOperand(info.child, child),
+ .pointer => |info| {
+ std.debug.assert(info.size == .slice); // Should be no other pointer types in the spec.
+ for (operand) |item| {
+ section.writeOperand(info.child, item);
+ }
+ },
+ .@"struct" => |info| {
+ if (info.layout == .@"packed") {
+ section.writeWord(@as(Word, @bitCast(operand)));
+ } else {
+ section.writeExtendedMask(Operand, operand);
+ }
+ },
+ .@"union" => section.writeExtendedUnion(Operand, operand),
+ else => unreachable,
+ },
+ }
+}
+
+fn writeString(section: *Section, str: []const u8) void {
+ const zero_terminated_len = str.len + 1;
+ var i: usize = 0;
+ while (i < zero_terminated_len) : (i += @sizeOf(Word)) {
+ var word: Word = 0;
+ var j: usize = 0;
+ while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
+ word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8)));
+ }
+ section.instructions.appendAssumeCapacity(word);
+ }
+}
+
+fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
+ switch (operand) {
+ .int32 => |int| section.writeWord(@bitCast(int)),
+ .uint32 => |int| section.writeWord(@bitCast(int)),
+ .int64 => |int| section.writeDoubleWord(@bitCast(int)),
+ .uint64 => |int| section.writeDoubleWord(@bitCast(int)),
+ .float32 => |float| section.writeWord(@bitCast(float)),
+ .float64 => |float| section.writeDoubleWord(@bitCast(float)),
+ }
+}
+
+fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
+ var mask: Word = 0;
+ inline for (@typeInfo(Operand).@"struct".fields, 0..) |field, bit| {
+ switch (@typeInfo(field.type)) {
+ .optional => if (@field(operand, field.name) != null) {
+ mask |= 1 << @as(u5, @intCast(bit));
+ },
+ .bool => if (@field(operand, field.name)) {
+ mask |= 1 << @as(u5, @intCast(bit));
+ },
+ else => unreachable,
+ }
+ }
+
+ section.writeWord(mask);
+
+ inline for (@typeInfo(Operand).@"struct".fields) |field| {
+ switch (@typeInfo(field.type)) {
+ .optional => |info| if (@field(operand, field.name)) |child| {
+ section.writeOperands(info.child, child);
+ },
+ .bool => {},
+ else => unreachable,
+ }
+ }
+}
+
+fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operand) void {
+ return switch (operand) {
+ inline else => |op, tag| {
+ section.writeWord(@intFromEnum(tag));
+ section.writeOperands(
+ @FieldType(Operand, @tagName(tag)),
+ op,
+ );
+ },
+ };
+}
+
+fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) usize {
+ return operandsSize(opcode.Operands(), operands) + 1;
+}
+
+fn operandsSize(comptime Operands: type, operands: Operands) usize {
+ const fields = switch (@typeInfo(Operands)) {
+ .@"struct" => |info| info.fields,
+ .void => return 0,
+ else => unreachable,
+ };
+
+ var total: usize = 0;
+ inline for (fields) |field| {
+ total += operandSize(field.type, @field(operands, field.name));
+ }
+
+ return total;
+}
+
+fn operandSize(comptime Operand: type, operand: Operand) usize {
+ return switch (Operand) {
+ spec.LiteralSpecConstantOpInteger => unreachable,
+ spec.Id, spec.LiteralInteger, spec.LiteralExtInstInteger => 1,
+ spec.LiteralString => std.math.divCeil(usize, operand.len + 1, @sizeOf(Word)) catch unreachable,
+ spec.LiteralContextDependentNumber => switch (operand) {
+ .int32, .uint32, .float32 => 1,
+ .int64, .uint64, .float64 => 2,
+ },
+ spec.PairLiteralIntegerIdRef, spec.PairIdRefLiteralInteger, spec.PairIdRefIdRef => 2,
+ else => switch (@typeInfo(Operand)) {
+ .@"enum" => 1,
+ .optional => |info| if (operand) |child| operandSize(info.child, child) else 0,
+ .pointer => |info| blk: {
+ std.debug.assert(info.size == .slice); // Should be no other pointer types in the spec.
+ var total: usize = 0;
+ for (operand) |item| {
+ total += operandSize(info.child, item);
+ }
+ break :blk total;
+ },
+ .@"struct" => |struct_info| {
+ if (struct_info.layout == .@"packed") return 1;
+
+ var total: usize = 0;
+ inline for (@typeInfo(Operand).@"struct".fields) |field| {
+ switch (@typeInfo(field.type)) {
+ .optional => |info| if (@field(operand, field.name)) |child| {
+ total += operandsSize(info.child, child);
+ },
+ .bool => {},
+ else => unreachable,
+ }
+ }
+ return total + 1; // Add one for the mask itself.
+ },
+ .@"union" => switch (operand) {
+ inline else => |op, tag| operandsSize(@FieldType(Operand, @tagName(tag)), op) + 1,
+ },
+ else => unreachable,
+ },
+ };
+}
diff --git a/src/codegen/spirv/extinst.zig.grammar.json b/src/codegen/spirv/extinst.zig.grammar.json
new file mode 100644
index 0000000000..ea8c5f7729
--- /dev/null
+++ b/src/codegen/spirv/extinst.zig.grammar.json
@@ -0,0 +1,11 @@
+{
+ "version": 0,
+ "revision": 0,
+ "instructions": [
+ {
+ "opname": "InvocationGlobal",
+ "opcode": 0,
+ "operands": [{ "kind": "IdRef", "name": "initializer function" }]
+ }
+ ]
+}
diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig
new file mode 100644
index 0000000000..92f37d33c5
--- /dev/null
+++ b/src/codegen/spirv/spec.zig
@@ -0,0 +1,18428 @@
+//! This file is auto-generated by tools/gen_spirv_spec.zig.
+
+const std = @import("std");
+
+pub const Version = packed struct(Word) {
+ padding: u8 = 0,
+ minor: u8,
+ major: u8,
+ padding0: u8 = 0,
+
+ pub fn toWord(self: @This()) Word {
+ return @bitCast(self);
+ }
+};
+
+pub const Word = u32;
+pub const Id = enum(Word) {
+ none,
+ _,
+
+ pub fn format(self: Id, writer: *std.io.Writer) std.io.Writer.Error!void {
+ switch (self) {
+ .none => try writer.writeAll("(none)"),
+ else => try writer.print("%{d}", .{@intFromEnum(self)}),
+ }
+ }
+};
+
+pub const IdRange = struct {
+ base: u32,
+ len: u32,
+
+ pub fn at(range: IdRange, i: usize) Id {
+ std.debug.assert(i < range.len);
+ return @enumFromInt(range.base + i);
+ }
+};
+
+pub const LiteralInteger = Word;
+pub const LiteralFloat = Word;
+pub const LiteralString = []const u8;
+pub const LiteralContextDependentNumber = union(enum) {
+ int32: i32,
+ uint32: u32,
+ int64: i64,
+ uint64: u64,
+ float32: f32,
+ float64: f64,
+};
+pub const LiteralExtInstInteger = struct { inst: Word };
+pub const LiteralSpecConstantOpInteger = struct { opcode: Opcode };
+pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: Id };
+pub const PairIdRefLiteralInteger = struct { target: Id, member: LiteralInteger };
+pub const PairIdRefIdRef = [2]Id;
+
+pub const Quantifier = enum {
+ required,
+ optional,
+ variadic,
+};
+
+pub const Operand = struct {
+ kind: OperandKind,
+ quantifier: Quantifier,
+};
+
+pub const OperandCategory = enum {
+ bit_enum,
+ value_enum,
+ id,
+ literal,
+ composite,
+};
+
+pub const Enumerant = struct {
+ name: []const u8,
+ value: Word,
+ parameters: []const OperandKind,
+};
+
+pub const Instruction = struct {
+ name: []const u8,
+ opcode: Word,
+ operands: []const Operand,
+};
+
+pub const zig_generator_id: Word = 41;
+pub const version: Version = .{ .major = 1, .minor = 6, .patch = 4 };
+pub const magic_number: Word = 0x07230203;
+
+pub const Class = enum {
+ miscellaneous,
+ debug,
+ extension,
+ mode_setting,
+ type_declaration,
+ constant_creation,
+ function,
+ memory,
+ annotation,
+ composite,
+ image,
+ conversion,
+ arithmetic,
+ relational_and_logical,
+ bit,
+ derivative,
+ primitive,
+ barrier,
+ atomic,
+ control_flow,
+ group,
+ pipe,
+ device_side_enqueue,
+ non_uniform,
+ tensor,
+ graph,
+ reserved,
+};
+
+pub const OperandKind = enum {
+ opcode,
+ image_operands,
+ fp_fast_math_mode,
+ selection_control,
+ loop_control,
+ function_control,
+ memory_semantics,
+ memory_access,
+ kernel_profiling_info,
+ ray_flags,
+ fragment_shading_rate,
+ raw_access_chain_operands,
+ source_language,
+ execution_model,
+ addressing_model,
+ memory_model,
+ execution_mode,
+ storage_class,
+ dim,
+ sampler_addressing_mode,
+ sampler_filter_mode,
+ image_format,
+ image_channel_order,
+ image_channel_data_type,
+ fp_rounding_mode,
+ fp_denorm_mode,
+ quantization_modes,
+ fp_operation_mode,
+ overflow_modes,
+ linkage_type,
+ access_qualifier,
+ host_access_qualifier,
+ function_parameter_attribute,
+ decoration,
+ built_in,
+ scope,
+ group_operation,
+ kernel_enqueue_flags,
+ capability,
+ ray_query_intersection,
+ ray_query_committed_intersection_type,
+ ray_query_candidate_intersection_type,
+ packed_vector_format,
+ cooperative_matrix_operands,
+ cooperative_matrix_layout,
+ cooperative_matrix_use,
+ cooperative_matrix_reduce,
+ tensor_clamp_mode,
+ tensor_addressing_operands,
+ initialization_mode_qualifier,
+ load_cache_control,
+ store_cache_control,
+ named_maximum_number_of_registers,
+ matrix_multiply_accumulate_operands,
+ fp_encoding,
+ cooperative_vector_matrix_layout,
+ component_type,
+ id_result_type,
+ id_result,
+ id_memory_semantics,
+ id_scope,
+ id_ref,
+ literal_integer,
+ literal_string,
+ literal_float,
+ literal_context_dependent_number,
+ literal_ext_inst_integer,
+ literal_spec_constant_op_integer,
+ pair_literal_integer_id_ref,
+ pair_id_ref_literal_integer,
+ pair_id_ref_id_ref,
+ tensor_operands,
+ debug_info_debug_info_flags,
+ debug_info_debug_base_type_attribute_encoding,
+ debug_info_debug_composite_type,
+ debug_info_debug_type_qualifier,
+ debug_info_debug_operation,
+ open_cl_debug_info_100_debug_info_flags,
+ open_cl_debug_info_100_debug_base_type_attribute_encoding,
+ open_cl_debug_info_100_debug_composite_type,
+ open_cl_debug_info_100_debug_type_qualifier,
+ open_cl_debug_info_100_debug_operation,
+ open_cl_debug_info_100_debug_imported_entity,
+ non_semantic_clspv_reflection_6_kernel_property_flags,
+ non_semantic_shader_debug_info_100_debug_info_flags,
+ non_semantic_shader_debug_info_100_build_identifier_flags,
+ non_semantic_shader_debug_info_100_debug_base_type_attribute_encoding,
+ non_semantic_shader_debug_info_100_debug_composite_type,
+ non_semantic_shader_debug_info_100_debug_type_qualifier,
+ non_semantic_shader_debug_info_100_debug_operation,
+ non_semantic_shader_debug_info_100_debug_imported_entity,
+
+ pub fn category(self: OperandKind) OperandCategory {
+ return switch (self) {
+ .opcode => .literal,
+ .image_operands => .bit_enum,
+ .fp_fast_math_mode => .bit_enum,
+ .selection_control => .bit_enum,
+ .loop_control => .bit_enum,
+ .function_control => .bit_enum,
+ .memory_semantics => .bit_enum,
+ .memory_access => .bit_enum,
+ .kernel_profiling_info => .bit_enum,
+ .ray_flags => .bit_enum,
+ .fragment_shading_rate => .bit_enum,
+ .raw_access_chain_operands => .bit_enum,
+ .source_language => .value_enum,
+ .execution_model => .value_enum,
+ .addressing_model => .value_enum,
+ .memory_model => .value_enum,
+ .execution_mode => .value_enum,
+ .storage_class => .value_enum,
+ .dim => .value_enum,
+ .sampler_addressing_mode => .value_enum,
+ .sampler_filter_mode => .value_enum,
+ .image_format => .value_enum,
+ .image_channel_order => .value_enum,
+ .image_channel_data_type => .value_enum,
+ .fp_rounding_mode => .value_enum,
+ .fp_denorm_mode => .value_enum,
+ .quantization_modes => .value_enum,
+ .fp_operation_mode => .value_enum,
+ .overflow_modes => .value_enum,
+ .linkage_type => .value_enum,
+ .access_qualifier => .value_enum,
+ .host_access_qualifier => .value_enum,
+ .function_parameter_attribute => .value_enum,
+ .decoration => .value_enum,
+ .built_in => .value_enum,
+ .scope => .value_enum,
+ .group_operation => .value_enum,
+ .kernel_enqueue_flags => .value_enum,
+ .capability => .value_enum,
+ .ray_query_intersection => .value_enum,
+ .ray_query_committed_intersection_type => .value_enum,
+ .ray_query_candidate_intersection_type => .value_enum,
+ .packed_vector_format => .value_enum,
+ .cooperative_matrix_operands => .bit_enum,
+ .cooperative_matrix_layout => .value_enum,
+ .cooperative_matrix_use => .value_enum,
+ .cooperative_matrix_reduce => .bit_enum,
+ .tensor_clamp_mode => .value_enum,
+ .tensor_addressing_operands => .bit_enum,
+ .initialization_mode_qualifier => .value_enum,
+ .load_cache_control => .value_enum,
+ .store_cache_control => .value_enum,
+ .named_maximum_number_of_registers => .value_enum,
+ .matrix_multiply_accumulate_operands => .bit_enum,
+ .fp_encoding => .value_enum,
+ .cooperative_vector_matrix_layout => .value_enum,
+ .component_type => .value_enum,
+ .id_result_type => .id,
+ .id_result => .id,
+ .id_memory_semantics => .id,
+ .id_scope => .id,
+ .id_ref => .id,
+ .literal_integer => .literal,
+ .literal_string => .literal,
+ .literal_float => .literal,
+ .literal_context_dependent_number => .literal,
+ .literal_ext_inst_integer => .literal,
+ .literal_spec_constant_op_integer => .literal,
+ .pair_literal_integer_id_ref => .composite,
+ .pair_id_ref_literal_integer => .composite,
+ .pair_id_ref_id_ref => .composite,
+ .tensor_operands => .bit_enum,
+ .debug_info_debug_info_flags => .bit_enum,
+ .debug_info_debug_base_type_attribute_encoding => .value_enum,
+ .debug_info_debug_composite_type => .value_enum,
+ .debug_info_debug_type_qualifier => .value_enum,
+ .debug_info_debug_operation => .value_enum,
+ .open_cl_debug_info_100_debug_info_flags => .bit_enum,
+ .open_cl_debug_info_100_debug_base_type_attribute_encoding => .value_enum,
+ .open_cl_debug_info_100_debug_composite_type => .value_enum,
+ .open_cl_debug_info_100_debug_type_qualifier => .value_enum,
+ .open_cl_debug_info_100_debug_operation => .value_enum,
+ .open_cl_debug_info_100_debug_imported_entity => .value_enum,
+ .non_semantic_clspv_reflection_6_kernel_property_flags => .bit_enum,
+ .non_semantic_shader_debug_info_100_debug_info_flags => .bit_enum,
+ .non_semantic_shader_debug_info_100_build_identifier_flags => .bit_enum,
+ .non_semantic_shader_debug_info_100_debug_base_type_attribute_encoding => .value_enum,
+ .non_semantic_shader_debug_info_100_debug_composite_type => .value_enum,
+ .non_semantic_shader_debug_info_100_debug_type_qualifier => .value_enum,
+ .non_semantic_shader_debug_info_100_debug_operation => .value_enum,
+ .non_semantic_shader_debug_info_100_debug_imported_entity => .value_enum,
+ };
+ }
+ pub fn enumerants(self: OperandKind) []const Enumerant {
+ return switch (self) {
+ .opcode => unreachable,
+ .image_operands => &.{
+ .{ .name = "Bias", .value = 0x0001, .parameters = &.{.id_ref} },
+ .{ .name = "Lod", .value = 0x0002, .parameters = &.{.id_ref} },
+ .{ .name = "Grad", .value = 0x0004, .parameters = &.{ .id_ref, .id_ref } },
+ .{ .name = "ConstOffset", .value = 0x0008, .parameters = &.{.id_ref} },
+ .{ .name = "Offset", .value = 0x0010, .parameters = &.{.id_ref} },
+ .{ .name = "ConstOffsets", .value = 0x0020, .parameters = &.{.id_ref} },
+ .{ .name = "Sample", .value = 0x0040, .parameters = &.{.id_ref} },
+ .{ .name = "MinLod", .value = 0x0080, .parameters = &.{.id_ref} },
+ .{ .name = "MakeTexelAvailable", .value = 0x0100, .parameters = &.{.id_scope} },
+ .{ .name = "MakeTexelVisible", .value = 0x0200, .parameters = &.{.id_scope} },
+ .{ .name = "NonPrivateTexel", .value = 0x0400, .parameters = &.{} },
+ .{ .name = "VolatileTexel", .value = 0x0800, .parameters = &.{} },
+ .{ .name = "SignExtend", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "ZeroExtend", .value = 0x2000, .parameters = &.{} },
+ .{ .name = "Nontemporal", .value = 0x4000, .parameters = &.{} },
+ .{ .name = "Offsets", .value = 0x10000, .parameters = &.{.id_ref} },
+ },
+ .fp_fast_math_mode => &.{
+ .{ .name = "NotNaN", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "NotInf", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "NSZ", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "AllowRecip", .value = 0x0008, .parameters = &.{} },
+ .{ .name = "Fast", .value = 0x0010, .parameters = &.{} },
+ .{ .name = "AllowContract", .value = 0x10000, .parameters = &.{} },
+ .{ .name = "AllowReassoc", .value = 0x20000, .parameters = &.{} },
+ .{ .name = "AllowTransform", .value = 0x40000, .parameters = &.{} },
+ },
+ .selection_control => &.{
+ .{ .name = "Flatten", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "DontFlatten", .value = 0x0002, .parameters = &.{} },
+ },
+ .loop_control => &.{
+ .{ .name = "Unroll", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "DontUnroll", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "DependencyInfinite", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "DependencyLength", .value = 0x0008, .parameters = &.{.literal_integer} },
+ .{ .name = "MinIterations", .value = 0x0010, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxIterations", .value = 0x0020, .parameters = &.{.literal_integer} },
+ .{ .name = "IterationMultiple", .value = 0x0040, .parameters = &.{.literal_integer} },
+ .{ .name = "PeelCount", .value = 0x0080, .parameters = &.{.literal_integer} },
+ .{ .name = "PartialCount", .value = 0x0100, .parameters = &.{.literal_integer} },
+ .{ .name = "InitiationIntervalINTEL", .value = 0x10000, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxConcurrencyINTEL", .value = 0x20000, .parameters = &.{.literal_integer} },
+ .{ .name = "DependencyArrayINTEL", .value = 0x40000, .parameters = &.{.literal_integer} },
+ .{ .name = "PipelineEnableINTEL", .value = 0x80000, .parameters = &.{.literal_integer} },
+ .{ .name = "LoopCoalesceINTEL", .value = 0x100000, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxInterleavingINTEL", .value = 0x200000, .parameters = &.{.literal_integer} },
+ .{ .name = "SpeculatedIterationsINTEL", .value = 0x400000, .parameters = &.{.literal_integer} },
+ .{ .name = "NoFusionINTEL", .value = 0x800000, .parameters = &.{} },
+ .{ .name = "LoopCountINTEL", .value = 0x1000000, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxReinvocationDelayINTEL", .value = 0x2000000, .parameters = &.{.literal_integer} },
+ },
+ .function_control => &.{
+ .{ .name = "Inline", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "DontInline", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "Pure", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "Const", .value = 0x0008, .parameters = &.{} },
+ .{ .name = "OptNoneEXT", .value = 0x10000, .parameters = &.{} },
+ },
+ .memory_semantics => &.{
+ .{ .name = "Relaxed", .value = 0x0000, .parameters = &.{} },
+ .{ .name = "Acquire", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "Release", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "AcquireRelease", .value = 0x0008, .parameters = &.{} },
+ .{ .name = "SequentiallyConsistent", .value = 0x0010, .parameters = &.{} },
+ .{ .name = "UniformMemory", .value = 0x0040, .parameters = &.{} },
+ .{ .name = "SubgroupMemory", .value = 0x0080, .parameters = &.{} },
+ .{ .name = "WorkgroupMemory", .value = 0x0100, .parameters = &.{} },
+ .{ .name = "CrossWorkgroupMemory", .value = 0x0200, .parameters = &.{} },
+ .{ .name = "AtomicCounterMemory", .value = 0x0400, .parameters = &.{} },
+ .{ .name = "ImageMemory", .value = 0x0800, .parameters = &.{} },
+ .{ .name = "OutputMemory", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "MakeAvailable", .value = 0x2000, .parameters = &.{} },
+ .{ .name = "MakeVisible", .value = 0x4000, .parameters = &.{} },
+ .{ .name = "Volatile", .value = 0x8000, .parameters = &.{} },
+ },
+ .memory_access => &.{
+ .{ .name = "Volatile", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "Aligned", .value = 0x0002, .parameters = &.{.literal_integer} },
+ .{ .name = "Nontemporal", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "MakePointerAvailable", .value = 0x0008, .parameters = &.{.id_scope} },
+ .{ .name = "MakePointerVisible", .value = 0x0010, .parameters = &.{.id_scope} },
+ .{ .name = "NonPrivatePointer", .value = 0x0020, .parameters = &.{} },
+ .{ .name = "AliasScopeINTELMask", .value = 0x10000, .parameters = &.{.id_ref} },
+ .{ .name = "NoAliasINTELMask", .value = 0x20000, .parameters = &.{.id_ref} },
+ },
+ .kernel_profiling_info => &.{
+ .{ .name = "CmdExecTime", .value = 0x0001, .parameters = &.{} },
+ },
+ .ray_flags => &.{
+ .{ .name = "NoneKHR", .value = 0x0000, .parameters = &.{} },
+ .{ .name = "OpaqueKHR", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "NoOpaqueKHR", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "TerminateOnFirstHitKHR", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "SkipClosestHitShaderKHR", .value = 0x0008, .parameters = &.{} },
+ .{ .name = "CullBackFacingTrianglesKHR", .value = 0x0010, .parameters = &.{} },
+ .{ .name = "CullFrontFacingTrianglesKHR", .value = 0x0020, .parameters = &.{} },
+ .{ .name = "CullOpaqueKHR", .value = 0x0040, .parameters = &.{} },
+ .{ .name = "CullNoOpaqueKHR", .value = 0x0080, .parameters = &.{} },
+ .{ .name = "SkipTrianglesKHR", .value = 0x0100, .parameters = &.{} },
+ .{ .name = "SkipAABBsKHR", .value = 0x0200, .parameters = &.{} },
+ .{ .name = "ForceOpacityMicromap2StateEXT", .value = 0x0400, .parameters = &.{} },
+ },
+ .fragment_shading_rate => &.{
+ .{ .name = "Vertical2Pixels", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "Vertical4Pixels", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "Horizontal2Pixels", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "Horizontal4Pixels", .value = 0x0008, .parameters = &.{} },
+ },
+ .raw_access_chain_operands => &.{
+ .{ .name = "RobustnessPerComponentNV", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "RobustnessPerElementNV", .value = 0x0002, .parameters = &.{} },
+ },
+ .source_language => &.{
+ .{ .name = "Unknown", .value = 0, .parameters = &.{} },
+ .{ .name = "ESSL", .value = 1, .parameters = &.{} },
+ .{ .name = "GLSL", .value = 2, .parameters = &.{} },
+ .{ .name = "OpenCL_C", .value = 3, .parameters = &.{} },
+ .{ .name = "OpenCL_CPP", .value = 4, .parameters = &.{} },
+ .{ .name = "HLSL", .value = 5, .parameters = &.{} },
+ .{ .name = "CPP_for_OpenCL", .value = 6, .parameters = &.{} },
+ .{ .name = "SYCL", .value = 7, .parameters = &.{} },
+ .{ .name = "HERO_C", .value = 8, .parameters = &.{} },
+ .{ .name = "NZSL", .value = 9, .parameters = &.{} },
+ .{ .name = "WGSL", .value = 10, .parameters = &.{} },
+ .{ .name = "Slang", .value = 11, .parameters = &.{} },
+ .{ .name = "Zig", .value = 12, .parameters = &.{} },
+ .{ .name = "Rust", .value = 13, .parameters = &.{} },
+ },
+ .execution_model => &.{
+ .{ .name = "Vertex", .value = 0, .parameters = &.{} },
+ .{ .name = "TessellationControl", .value = 1, .parameters = &.{} },
+ .{ .name = "TessellationEvaluation", .value = 2, .parameters = &.{} },
+ .{ .name = "Geometry", .value = 3, .parameters = &.{} },
+ .{ .name = "Fragment", .value = 4, .parameters = &.{} },
+ .{ .name = "GLCompute", .value = 5, .parameters = &.{} },
+ .{ .name = "Kernel", .value = 6, .parameters = &.{} },
+ .{ .name = "TaskNV", .value = 5267, .parameters = &.{} },
+ .{ .name = "MeshNV", .value = 5268, .parameters = &.{} },
+ .{ .name = "RayGenerationKHR", .value = 5313, .parameters = &.{} },
+ .{ .name = "IntersectionKHR", .value = 5314, .parameters = &.{} },
+ .{ .name = "AnyHitKHR", .value = 5315, .parameters = &.{} },
+ .{ .name = "ClosestHitKHR", .value = 5316, .parameters = &.{} },
+ .{ .name = "MissKHR", .value = 5317, .parameters = &.{} },
+ .{ .name = "CallableKHR", .value = 5318, .parameters = &.{} },
+ .{ .name = "TaskEXT", .value = 5364, .parameters = &.{} },
+ .{ .name = "MeshEXT", .value = 5365, .parameters = &.{} },
+ },
+ .addressing_model => &.{
+ .{ .name = "Logical", .value = 0, .parameters = &.{} },
+ .{ .name = "Physical32", .value = 1, .parameters = &.{} },
+ .{ .name = "Physical64", .value = 2, .parameters = &.{} },
+ .{ .name = "PhysicalStorageBuffer64", .value = 5348, .parameters = &.{} },
+ },
+ .memory_model => &.{
+ .{ .name = "Simple", .value = 0, .parameters = &.{} },
+ .{ .name = "GLSL450", .value = 1, .parameters = &.{} },
+ .{ .name = "OpenCL", .value = 2, .parameters = &.{} },
+ .{ .name = "Vulkan", .value = 3, .parameters = &.{} },
+ },
+ .execution_mode => &.{
+ .{ .name = "Invocations", .value = 0, .parameters = &.{.literal_integer} },
+ .{ .name = "SpacingEqual", .value = 1, .parameters = &.{} },
+ .{ .name = "SpacingFractionalEven", .value = 2, .parameters = &.{} },
+ .{ .name = "SpacingFractionalOdd", .value = 3, .parameters = &.{} },
+ .{ .name = "VertexOrderCw", .value = 4, .parameters = &.{} },
+ .{ .name = "VertexOrderCcw", .value = 5, .parameters = &.{} },
+ .{ .name = "PixelCenterInteger", .value = 6, .parameters = &.{} },
+ .{ .name = "OriginUpperLeft", .value = 7, .parameters = &.{} },
+ .{ .name = "OriginLowerLeft", .value = 8, .parameters = &.{} },
+ .{ .name = "EarlyFragmentTests", .value = 9, .parameters = &.{} },
+ .{ .name = "PointMode", .value = 10, .parameters = &.{} },
+ .{ .name = "Xfb", .value = 11, .parameters = &.{} },
+ .{ .name = "DepthReplacing", .value = 12, .parameters = &.{} },
+ .{ .name = "DepthGreater", .value = 14, .parameters = &.{} },
+ .{ .name = "DepthLess", .value = 15, .parameters = &.{} },
+ .{ .name = "DepthUnchanged", .value = 16, .parameters = &.{} },
+ .{ .name = "LocalSize", .value = 17, .parameters = &.{ .literal_integer, .literal_integer, .literal_integer } },
+ .{ .name = "LocalSizeHint", .value = 18, .parameters = &.{ .literal_integer, .literal_integer, .literal_integer } },
+ .{ .name = "InputPoints", .value = 19, .parameters = &.{} },
+ .{ .name = "InputLines", .value = 20, .parameters = &.{} },
+ .{ .name = "InputLinesAdjacency", .value = 21, .parameters = &.{} },
+ .{ .name = "Triangles", .value = 22, .parameters = &.{} },
+ .{ .name = "InputTrianglesAdjacency", .value = 23, .parameters = &.{} },
+ .{ .name = "Quads", .value = 24, .parameters = &.{} },
+ .{ .name = "Isolines", .value = 25, .parameters = &.{} },
+ .{ .name = "OutputVertices", .value = 26, .parameters = &.{.literal_integer} },
+ .{ .name = "OutputPoints", .value = 27, .parameters = &.{} },
+ .{ .name = "OutputLineStrip", .value = 28, .parameters = &.{} },
+ .{ .name = "OutputTriangleStrip", .value = 29, .parameters = &.{} },
+ .{ .name = "VecTypeHint", .value = 30, .parameters = &.{.literal_integer} },
+ .{ .name = "ContractionOff", .value = 31, .parameters = &.{} },
+ .{ .name = "Initializer", .value = 33, .parameters = &.{} },
+ .{ .name = "Finalizer", .value = 34, .parameters = &.{} },
+ .{ .name = "SubgroupSize", .value = 35, .parameters = &.{.literal_integer} },
+ .{ .name = "SubgroupsPerWorkgroup", .value = 36, .parameters = &.{.literal_integer} },
+ .{ .name = "SubgroupsPerWorkgroupId", .value = 37, .parameters = &.{.id_ref} },
+ .{ .name = "LocalSizeId", .value = 38, .parameters = &.{ .id_ref, .id_ref, .id_ref } },
+ .{ .name = "LocalSizeHintId", .value = 39, .parameters = &.{ .id_ref, .id_ref, .id_ref } },
+ .{ .name = "NonCoherentColorAttachmentReadEXT", .value = 4169, .parameters = &.{} },
+ .{ .name = "NonCoherentDepthAttachmentReadEXT", .value = 4170, .parameters = &.{} },
+ .{ .name = "NonCoherentStencilAttachmentReadEXT", .value = 4171, .parameters = &.{} },
+ .{ .name = "SubgroupUniformControlFlowKHR", .value = 4421, .parameters = &.{} },
+ .{ .name = "PostDepthCoverage", .value = 4446, .parameters = &.{} },
+ .{ .name = "DenormPreserve", .value = 4459, .parameters = &.{.literal_integer} },
+ .{ .name = "DenormFlushToZero", .value = 4460, .parameters = &.{.literal_integer} },
+ .{ .name = "SignedZeroInfNanPreserve", .value = 4461, .parameters = &.{.literal_integer} },
+ .{ .name = "RoundingModeRTE", .value = 4462, .parameters = &.{.literal_integer} },
+ .{ .name = "RoundingModeRTZ", .value = 4463, .parameters = &.{.literal_integer} },
+ .{ .name = "NonCoherentTileAttachmentReadQCOM", .value = 4489, .parameters = &.{} },
+ .{ .name = "TileShadingRateQCOM", .value = 4490, .parameters = &.{ .literal_integer, .literal_integer, .literal_integer } },
+ .{ .name = "EarlyAndLateFragmentTestsAMD", .value = 5017, .parameters = &.{} },
+ .{ .name = "StencilRefReplacingEXT", .value = 5027, .parameters = &.{} },
+ .{ .name = "CoalescingAMDX", .value = 5069, .parameters = &.{} },
+ .{ .name = "IsApiEntryAMDX", .value = 5070, .parameters = &.{.id_ref} },
+ .{ .name = "MaxNodeRecursionAMDX", .value = 5071, .parameters = &.{.id_ref} },
+ .{ .name = "StaticNumWorkgroupsAMDX", .value = 5072, .parameters = &.{ .id_ref, .id_ref, .id_ref } },
+ .{ .name = "ShaderIndexAMDX", .value = 5073, .parameters = &.{.id_ref} },
+ .{ .name = "MaxNumWorkgroupsAMDX", .value = 5077, .parameters = &.{ .id_ref, .id_ref, .id_ref } },
+ .{ .name = "StencilRefUnchangedFrontAMD", .value = 5079, .parameters = &.{} },
+ .{ .name = "StencilRefGreaterFrontAMD", .value = 5080, .parameters = &.{} },
+ .{ .name = "StencilRefLessFrontAMD", .value = 5081, .parameters = &.{} },
+ .{ .name = "StencilRefUnchangedBackAMD", .value = 5082, .parameters = &.{} },
+ .{ .name = "StencilRefGreaterBackAMD", .value = 5083, .parameters = &.{} },
+ .{ .name = "StencilRefLessBackAMD", .value = 5084, .parameters = &.{} },
+ .{ .name = "QuadDerivativesKHR", .value = 5088, .parameters = &.{} },
+ .{ .name = "RequireFullQuadsKHR", .value = 5089, .parameters = &.{} },
+ .{ .name = "SharesInputWithAMDX", .value = 5102, .parameters = &.{ .id_ref, .id_ref } },
+ .{ .name = "OutputLinesEXT", .value = 5269, .parameters = &.{} },
+ .{ .name = "OutputPrimitivesEXT", .value = 5270, .parameters = &.{.literal_integer} },
+ .{ .name = "DerivativeGroupQuadsKHR", .value = 5289, .parameters = &.{} },
+ .{ .name = "DerivativeGroupLinearKHR", .value = 5290, .parameters = &.{} },
+ .{ .name = "OutputTrianglesEXT", .value = 5298, .parameters = &.{} },
+ .{ .name = "PixelInterlockOrderedEXT", .value = 5366, .parameters = &.{} },
+ .{ .name = "PixelInterlockUnorderedEXT", .value = 5367, .parameters = &.{} },
+ .{ .name = "SampleInterlockOrderedEXT", .value = 5368, .parameters = &.{} },
+ .{ .name = "SampleInterlockUnorderedEXT", .value = 5369, .parameters = &.{} },
+ .{ .name = "ShadingRateInterlockOrderedEXT", .value = 5370, .parameters = &.{} },
+ .{ .name = "ShadingRateInterlockUnorderedEXT", .value = 5371, .parameters = &.{} },
+ .{ .name = "SharedLocalMemorySizeINTEL", .value = 5618, .parameters = &.{.literal_integer} },
+ .{ .name = "RoundingModeRTPINTEL", .value = 5620, .parameters = &.{.literal_integer} },
+ .{ .name = "RoundingModeRTNINTEL", .value = 5621, .parameters = &.{.literal_integer} },
+ .{ .name = "FloatingPointModeALTINTEL", .value = 5622, .parameters = &.{.literal_integer} },
+ .{ .name = "FloatingPointModeIEEEINTEL", .value = 5623, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxWorkgroupSizeINTEL", .value = 5893, .parameters = &.{ .literal_integer, .literal_integer, .literal_integer } },
+ .{ .name = "MaxWorkDimINTEL", .value = 5894, .parameters = &.{.literal_integer} },
+ .{ .name = "NoGlobalOffsetINTEL", .value = 5895, .parameters = &.{} },
+ .{ .name = "NumSIMDWorkitemsINTEL", .value = 5896, .parameters = &.{.literal_integer} },
+ .{ .name = "SchedulerTargetFmaxMhzINTEL", .value = 5903, .parameters = &.{.literal_integer} },
+ .{ .name = "MaximallyReconvergesKHR", .value = 6023, .parameters = &.{} },
+ .{ .name = "FPFastMathDefault", .value = 6028, .parameters = &.{ .id_ref, .id_ref } },
+ .{ .name = "StreamingInterfaceINTEL", .value = 6154, .parameters = &.{.literal_integer} },
+ .{ .name = "RegisterMapInterfaceINTEL", .value = 6160, .parameters = &.{.literal_integer} },
+ .{ .name = "NamedBarrierCountINTEL", .value = 6417, .parameters = &.{.literal_integer} },
+ .{ .name = "MaximumRegistersINTEL", .value = 6461, .parameters = &.{.literal_integer} },
+ .{ .name = "MaximumRegistersIdINTEL", .value = 6462, .parameters = &.{.id_ref} },
+ .{ .name = "NamedMaximumRegistersINTEL", .value = 6463, .parameters = &.{.named_maximum_number_of_registers} },
+ },
+ .storage_class => &.{
+ .{ .name = "UniformConstant", .value = 0, .parameters = &.{} },
+ .{ .name = "Input", .value = 1, .parameters = &.{} },
+ .{ .name = "Uniform", .value = 2, .parameters = &.{} },
+ .{ .name = "Output", .value = 3, .parameters = &.{} },
+ .{ .name = "Workgroup", .value = 4, .parameters = &.{} },
+ .{ .name = "CrossWorkgroup", .value = 5, .parameters = &.{} },
+ .{ .name = "Private", .value = 6, .parameters = &.{} },
+ .{ .name = "Function", .value = 7, .parameters = &.{} },
+ .{ .name = "Generic", .value = 8, .parameters = &.{} },
+ .{ .name = "PushConstant", .value = 9, .parameters = &.{} },
+ .{ .name = "AtomicCounter", .value = 10, .parameters = &.{} },
+ .{ .name = "Image", .value = 11, .parameters = &.{} },
+ .{ .name = "StorageBuffer", .value = 12, .parameters = &.{} },
+ .{ .name = "TileImageEXT", .value = 4172, .parameters = &.{} },
+ .{ .name = "TileAttachmentQCOM", .value = 4491, .parameters = &.{} },
+ .{ .name = "NodePayloadAMDX", .value = 5068, .parameters = &.{} },
+ .{ .name = "CallableDataKHR", .value = 5328, .parameters = &.{} },
+ .{ .name = "IncomingCallableDataKHR", .value = 5329, .parameters = &.{} },
+ .{ .name = "RayPayloadKHR", .value = 5338, .parameters = &.{} },
+ .{ .name = "HitAttributeKHR", .value = 5339, .parameters = &.{} },
+ .{ .name = "IncomingRayPayloadKHR", .value = 5342, .parameters = &.{} },
+ .{ .name = "ShaderRecordBufferKHR", .value = 5343, .parameters = &.{} },
+ .{ .name = "PhysicalStorageBuffer", .value = 5349, .parameters = &.{} },
+ .{ .name = "HitObjectAttributeNV", .value = 5385, .parameters = &.{} },
+ .{ .name = "TaskPayloadWorkgroupEXT", .value = 5402, .parameters = &.{} },
+ .{ .name = "CodeSectionINTEL", .value = 5605, .parameters = &.{} },
+ .{ .name = "DeviceOnlyINTEL", .value = 5936, .parameters = &.{} },
+ .{ .name = "HostOnlyINTEL", .value = 5937, .parameters = &.{} },
+ },
+ .dim => &.{
+ .{ .name = "1D", .value = 0, .parameters = &.{} },
+ .{ .name = "2D", .value = 1, .parameters = &.{} },
+ .{ .name = "3D", .value = 2, .parameters = &.{} },
+ .{ .name = "Cube", .value = 3, .parameters = &.{} },
+ .{ .name = "Rect", .value = 4, .parameters = &.{} },
+ .{ .name = "Buffer", .value = 5, .parameters = &.{} },
+ .{ .name = "SubpassData", .value = 6, .parameters = &.{} },
+ .{ .name = "TileImageDataEXT", .value = 4173, .parameters = &.{} },
+ },
+ .sampler_addressing_mode => &.{
+ .{ .name = "None", .value = 0, .parameters = &.{} },
+ .{ .name = "ClampToEdge", .value = 1, .parameters = &.{} },
+ .{ .name = "Clamp", .value = 2, .parameters = &.{} },
+ .{ .name = "Repeat", .value = 3, .parameters = &.{} },
+ .{ .name = "RepeatMirrored", .value = 4, .parameters = &.{} },
+ },
+ .sampler_filter_mode => &.{
+ .{ .name = "Nearest", .value = 0, .parameters = &.{} },
+ .{ .name = "Linear", .value = 1, .parameters = &.{} },
+ },
+ .image_format => &.{
+ .{ .name = "Unknown", .value = 0, .parameters = &.{} },
+ .{ .name = "Rgba32f", .value = 1, .parameters = &.{} },
+ .{ .name = "Rgba16f", .value = 2, .parameters = &.{} },
+ .{ .name = "R32f", .value = 3, .parameters = &.{} },
+ .{ .name = "Rgba8", .value = 4, .parameters = &.{} },
+ .{ .name = "Rgba8Snorm", .value = 5, .parameters = &.{} },
+ .{ .name = "Rg32f", .value = 6, .parameters = &.{} },
+ .{ .name = "Rg16f", .value = 7, .parameters = &.{} },
+ .{ .name = "R11fG11fB10f", .value = 8, .parameters = &.{} },
+ .{ .name = "R16f", .value = 9, .parameters = &.{} },
+ .{ .name = "Rgba16", .value = 10, .parameters = &.{} },
+ .{ .name = "Rgb10A2", .value = 11, .parameters = &.{} },
+ .{ .name = "Rg16", .value = 12, .parameters = &.{} },
+ .{ .name = "Rg8", .value = 13, .parameters = &.{} },
+ .{ .name = "R16", .value = 14, .parameters = &.{} },
+ .{ .name = "R8", .value = 15, .parameters = &.{} },
+ .{ .name = "Rgba16Snorm", .value = 16, .parameters = &.{} },
+ .{ .name = "Rg16Snorm", .value = 17, .parameters = &.{} },
+ .{ .name = "Rg8Snorm", .value = 18, .parameters = &.{} },
+ .{ .name = "R16Snorm", .value = 19, .parameters = &.{} },
+ .{ .name = "R8Snorm", .value = 20, .parameters = &.{} },
+ .{ .name = "Rgba32i", .value = 21, .parameters = &.{} },
+ .{ .name = "Rgba16i", .value = 22, .parameters = &.{} },
+ .{ .name = "Rgba8i", .value = 23, .parameters = &.{} },
+ .{ .name = "R32i", .value = 24, .parameters = &.{} },
+ .{ .name = "Rg32i", .value = 25, .parameters = &.{} },
+ .{ .name = "Rg16i", .value = 26, .parameters = &.{} },
+ .{ .name = "Rg8i", .value = 27, .parameters = &.{} },
+ .{ .name = "R16i", .value = 28, .parameters = &.{} },
+ .{ .name = "R8i", .value = 29, .parameters = &.{} },
+ .{ .name = "Rgba32ui", .value = 30, .parameters = &.{} },
+ .{ .name = "Rgba16ui", .value = 31, .parameters = &.{} },
+ .{ .name = "Rgba8ui", .value = 32, .parameters = &.{} },
+ .{ .name = "R32ui", .value = 33, .parameters = &.{} },
+ .{ .name = "Rgb10a2ui", .value = 34, .parameters = &.{} },
+ .{ .name = "Rg32ui", .value = 35, .parameters = &.{} },
+ .{ .name = "Rg16ui", .value = 36, .parameters = &.{} },
+ .{ .name = "Rg8ui", .value = 37, .parameters = &.{} },
+ .{ .name = "R16ui", .value = 38, .parameters = &.{} },
+ .{ .name = "R8ui", .value = 39, .parameters = &.{} },
+ .{ .name = "R64ui", .value = 40, .parameters = &.{} },
+ .{ .name = "R64i", .value = 41, .parameters = &.{} },
+ },
+ .image_channel_order => &.{
+ .{ .name = "R", .value = 0, .parameters = &.{} },
+ .{ .name = "A", .value = 1, .parameters = &.{} },
+ .{ .name = "RG", .value = 2, .parameters = &.{} },
+ .{ .name = "RA", .value = 3, .parameters = &.{} },
+ .{ .name = "RGB", .value = 4, .parameters = &.{} },
+ .{ .name = "RGBA", .value = 5, .parameters = &.{} },
+ .{ .name = "BGRA", .value = 6, .parameters = &.{} },
+ .{ .name = "ARGB", .value = 7, .parameters = &.{} },
+ .{ .name = "Intensity", .value = 8, .parameters = &.{} },
+ .{ .name = "Luminance", .value = 9, .parameters = &.{} },
+ .{ .name = "Rx", .value = 10, .parameters = &.{} },
+ .{ .name = "RGx", .value = 11, .parameters = &.{} },
+ .{ .name = "RGBx", .value = 12, .parameters = &.{} },
+ .{ .name = "Depth", .value = 13, .parameters = &.{} },
+ .{ .name = "DepthStencil", .value = 14, .parameters = &.{} },
+ .{ .name = "sRGB", .value = 15, .parameters = &.{} },
+ .{ .name = "sRGBx", .value = 16, .parameters = &.{} },
+ .{ .name = "sRGBA", .value = 17, .parameters = &.{} },
+ .{ .name = "sBGRA", .value = 18, .parameters = &.{} },
+ .{ .name = "ABGR", .value = 19, .parameters = &.{} },
+ },
+ .image_channel_data_type => &.{
+ .{ .name = "SnormInt8", .value = 0, .parameters = &.{} },
+ .{ .name = "SnormInt16", .value = 1, .parameters = &.{} },
+ .{ .name = "UnormInt8", .value = 2, .parameters = &.{} },
+ .{ .name = "UnormInt16", .value = 3, .parameters = &.{} },
+ .{ .name = "UnormShort565", .value = 4, .parameters = &.{} },
+ .{ .name = "UnormShort555", .value = 5, .parameters = &.{} },
+ .{ .name = "UnormInt101010", .value = 6, .parameters = &.{} },
+ .{ .name = "SignedInt8", .value = 7, .parameters = &.{} },
+ .{ .name = "SignedInt16", .value = 8, .parameters = &.{} },
+ .{ .name = "SignedInt32", .value = 9, .parameters = &.{} },
+ .{ .name = "UnsignedInt8", .value = 10, .parameters = &.{} },
+ .{ .name = "UnsignedInt16", .value = 11, .parameters = &.{} },
+ .{ .name = "UnsignedInt32", .value = 12, .parameters = &.{} },
+ .{ .name = "HalfFloat", .value = 13, .parameters = &.{} },
+ .{ .name = "Float", .value = 14, .parameters = &.{} },
+ .{ .name = "UnormInt24", .value = 15, .parameters = &.{} },
+ .{ .name = "UnormInt101010_2", .value = 16, .parameters = &.{} },
+ .{ .name = "UnormInt10X6EXT", .value = 17, .parameters = &.{} },
+ .{ .name = "UnsignedIntRaw10EXT", .value = 19, .parameters = &.{} },
+ .{ .name = "UnsignedIntRaw12EXT", .value = 20, .parameters = &.{} },
+ .{ .name = "UnormInt2_101010EXT", .value = 21, .parameters = &.{} },
+ .{ .name = "UnsignedInt10X6EXT", .value = 22, .parameters = &.{} },
+ .{ .name = "UnsignedInt12X4EXT", .value = 23, .parameters = &.{} },
+ .{ .name = "UnsignedInt14X2EXT", .value = 24, .parameters = &.{} },
+ .{ .name = "UnormInt12X4EXT", .value = 25, .parameters = &.{} },
+ .{ .name = "UnormInt14X2EXT", .value = 26, .parameters = &.{} },
+ },
+ .fp_rounding_mode => &.{
+ .{ .name = "RTE", .value = 0, .parameters = &.{} },
+ .{ .name = "RTZ", .value = 1, .parameters = &.{} },
+ .{ .name = "RTP", .value = 2, .parameters = &.{} },
+ .{ .name = "RTN", .value = 3, .parameters = &.{} },
+ },
+ .fp_denorm_mode => &.{
+ .{ .name = "Preserve", .value = 0, .parameters = &.{} },
+ .{ .name = "FlushToZero", .value = 1, .parameters = &.{} },
+ },
+ .quantization_modes => &.{
+ .{ .name = "TRN", .value = 0, .parameters = &.{} },
+ .{ .name = "TRN_ZERO", .value = 1, .parameters = &.{} },
+ .{ .name = "RND", .value = 2, .parameters = &.{} },
+ .{ .name = "RND_ZERO", .value = 3, .parameters = &.{} },
+ .{ .name = "RND_INF", .value = 4, .parameters = &.{} },
+ .{ .name = "RND_MIN_INF", .value = 5, .parameters = &.{} },
+ .{ .name = "RND_CONV", .value = 6, .parameters = &.{} },
+ .{ .name = "RND_CONV_ODD", .value = 7, .parameters = &.{} },
+ },
+ .fp_operation_mode => &.{
+ .{ .name = "IEEE", .value = 0, .parameters = &.{} },
+ .{ .name = "ALT", .value = 1, .parameters = &.{} },
+ },
+ .overflow_modes => &.{
+ .{ .name = "WRAP", .value = 0, .parameters = &.{} },
+ .{ .name = "SAT", .value = 1, .parameters = &.{} },
+ .{ .name = "SAT_ZERO", .value = 2, .parameters = &.{} },
+ .{ .name = "SAT_SYM", .value = 3, .parameters = &.{} },
+ },
+ .linkage_type => &.{
+ .{ .name = "Export", .value = 0, .parameters = &.{} },
+ .{ .name = "Import", .value = 1, .parameters = &.{} },
+ .{ .name = "LinkOnceODR", .value = 2, .parameters = &.{} },
+ },
+ .access_qualifier => &.{
+ .{ .name = "ReadOnly", .value = 0, .parameters = &.{} },
+ .{ .name = "WriteOnly", .value = 1, .parameters = &.{} },
+ .{ .name = "ReadWrite", .value = 2, .parameters = &.{} },
+ },
+ .host_access_qualifier => &.{
+ .{ .name = "NoneINTEL", .value = 0, .parameters = &.{} },
+ .{ .name = "ReadINTEL", .value = 1, .parameters = &.{} },
+ .{ .name = "WriteINTEL", .value = 2, .parameters = &.{} },
+ .{ .name = "ReadWriteINTEL", .value = 3, .parameters = &.{} },
+ },
+ .function_parameter_attribute => &.{
+ .{ .name = "Zext", .value = 0, .parameters = &.{} },
+ .{ .name = "Sext", .value = 1, .parameters = &.{} },
+ .{ .name = "ByVal", .value = 2, .parameters = &.{} },
+ .{ .name = "Sret", .value = 3, .parameters = &.{} },
+ .{ .name = "NoAlias", .value = 4, .parameters = &.{} },
+ .{ .name = "NoCapture", .value = 5, .parameters = &.{} },
+ .{ .name = "NoWrite", .value = 6, .parameters = &.{} },
+ .{ .name = "NoReadWrite", .value = 7, .parameters = &.{} },
+ .{ .name = "RuntimeAlignedINTEL", .value = 5940, .parameters = &.{} },
+ },
+ .decoration => &.{
+ .{ .name = "RelaxedPrecision", .value = 0, .parameters = &.{} },
+ .{ .name = "SpecId", .value = 1, .parameters = &.{.literal_integer} },
+ .{ .name = "Block", .value = 2, .parameters = &.{} },
+ .{ .name = "BufferBlock", .value = 3, .parameters = &.{} },
+ .{ .name = "RowMajor", .value = 4, .parameters = &.{} },
+ .{ .name = "ColMajor", .value = 5, .parameters = &.{} },
+ .{ .name = "ArrayStride", .value = 6, .parameters = &.{.literal_integer} },
+ .{ .name = "MatrixStride", .value = 7, .parameters = &.{.literal_integer} },
+ .{ .name = "GLSLShared", .value = 8, .parameters = &.{} },
+ .{ .name = "GLSLPacked", .value = 9, .parameters = &.{} },
+ .{ .name = "CPacked", .value = 10, .parameters = &.{} },
+ .{ .name = "BuiltIn", .value = 11, .parameters = &.{.built_in} },
+ .{ .name = "NoPerspective", .value = 13, .parameters = &.{} },
+ .{ .name = "Flat", .value = 14, .parameters = &.{} },
+ .{ .name = "Patch", .value = 15, .parameters = &.{} },
+ .{ .name = "Centroid", .value = 16, .parameters = &.{} },
+ .{ .name = "Sample", .value = 17, .parameters = &.{} },
+ .{ .name = "Invariant", .value = 18, .parameters = &.{} },
+ .{ .name = "Restrict", .value = 19, .parameters = &.{} },
+ .{ .name = "Aliased", .value = 20, .parameters = &.{} },
+ .{ .name = "Volatile", .value = 21, .parameters = &.{} },
+ .{ .name = "Constant", .value = 22, .parameters = &.{} },
+ .{ .name = "Coherent", .value = 23, .parameters = &.{} },
+ .{ .name = "NonWritable", .value = 24, .parameters = &.{} },
+ .{ .name = "NonReadable", .value = 25, .parameters = &.{} },
+ .{ .name = "Uniform", .value = 26, .parameters = &.{} },
+ .{ .name = "UniformId", .value = 27, .parameters = &.{.id_scope} },
+ .{ .name = "SaturatedConversion", .value = 28, .parameters = &.{} },
+ .{ .name = "Stream", .value = 29, .parameters = &.{.literal_integer} },
+ .{ .name = "Location", .value = 30, .parameters = &.{.literal_integer} },
+ .{ .name = "Component", .value = 31, .parameters = &.{.literal_integer} },
+ .{ .name = "Index", .value = 32, .parameters = &.{.literal_integer} },
+ .{ .name = "Binding", .value = 33, .parameters = &.{.literal_integer} },
+ .{ .name = "DescriptorSet", .value = 34, .parameters = &.{.literal_integer} },
+ .{ .name = "Offset", .value = 35, .parameters = &.{.literal_integer} },
+ .{ .name = "XfbBuffer", .value = 36, .parameters = &.{.literal_integer} },
+ .{ .name = "XfbStride", .value = 37, .parameters = &.{.literal_integer} },
+ .{ .name = "FuncParamAttr", .value = 38, .parameters = &.{.function_parameter_attribute} },
+ .{ .name = "FPRoundingMode", .value = 39, .parameters = &.{.fp_rounding_mode} },
+ .{ .name = "FPFastMathMode", .value = 40, .parameters = &.{.fp_fast_math_mode} },
+ .{ .name = "LinkageAttributes", .value = 41, .parameters = &.{ .literal_string, .linkage_type } },
+ .{ .name = "NoContraction", .value = 42, .parameters = &.{} },
+ .{ .name = "InputAttachmentIndex", .value = 43, .parameters = &.{.literal_integer} },
+ .{ .name = "Alignment", .value = 44, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxByteOffset", .value = 45, .parameters = &.{.literal_integer} },
+ .{ .name = "AlignmentId", .value = 46, .parameters = &.{.id_ref} },
+ .{ .name = "MaxByteOffsetId", .value = 47, .parameters = &.{.id_ref} },
+ .{ .name = "SaturatedToLargestFloat8NormalConversionEXT", .value = 4216, .parameters = &.{} },
+ .{ .name = "NoSignedWrap", .value = 4469, .parameters = &.{} },
+ .{ .name = "NoUnsignedWrap", .value = 4470, .parameters = &.{} },
+ .{ .name = "WeightTextureQCOM", .value = 4487, .parameters = &.{} },
+ .{ .name = "BlockMatchTextureQCOM", .value = 4488, .parameters = &.{} },
+ .{ .name = "BlockMatchSamplerQCOM", .value = 4499, .parameters = &.{} },
+ .{ .name = "ExplicitInterpAMD", .value = 4999, .parameters = &.{} },
+ .{ .name = "NodeSharesPayloadLimitsWithAMDX", .value = 5019, .parameters = &.{.id_ref} },
+ .{ .name = "NodeMaxPayloadsAMDX", .value = 5020, .parameters = &.{.id_ref} },
+ .{ .name = "TrackFinishWritingAMDX", .value = 5078, .parameters = &.{} },
+ .{ .name = "PayloadNodeNameAMDX", .value = 5091, .parameters = &.{.id_ref} },
+ .{ .name = "PayloadNodeBaseIndexAMDX", .value = 5098, .parameters = &.{.id_ref} },
+ .{ .name = "PayloadNodeSparseArrayAMDX", .value = 5099, .parameters = &.{} },
+ .{ .name = "PayloadNodeArraySizeAMDX", .value = 5100, .parameters = &.{.id_ref} },
+ .{ .name = "PayloadDispatchIndirectAMDX", .value = 5105, .parameters = &.{} },
+ .{ .name = "OverrideCoverageNV", .value = 5248, .parameters = &.{} },
+ .{ .name = "PassthroughNV", .value = 5250, .parameters = &.{} },
+ .{ .name = "ViewportRelativeNV", .value = 5252, .parameters = &.{} },
+ .{ .name = "SecondaryViewportRelativeNV", .value = 5256, .parameters = &.{.literal_integer} },
+ .{ .name = "PerPrimitiveEXT", .value = 5271, .parameters = &.{} },
+ .{ .name = "PerViewNV", .value = 5272, .parameters = &.{} },
+ .{ .name = "PerTaskNV", .value = 5273, .parameters = &.{} },
+ .{ .name = "PerVertexKHR", .value = 5285, .parameters = &.{} },
+ .{ .name = "NonUniform", .value = 5300, .parameters = &.{} },
+ .{ .name = "RestrictPointer", .value = 5355, .parameters = &.{} },
+ .{ .name = "AliasedPointer", .value = 5356, .parameters = &.{} },
+ .{ .name = "HitObjectShaderRecordBufferNV", .value = 5386, .parameters = &.{} },
+ .{ .name = "BindlessSamplerNV", .value = 5398, .parameters = &.{} },
+ .{ .name = "BindlessImageNV", .value = 5399, .parameters = &.{} },
+ .{ .name = "BoundSamplerNV", .value = 5400, .parameters = &.{} },
+ .{ .name = "BoundImageNV", .value = 5401, .parameters = &.{} },
+ .{ .name = "SIMTCallINTEL", .value = 5599, .parameters = &.{.literal_integer} },
+ .{ .name = "ReferencedIndirectlyINTEL", .value = 5602, .parameters = &.{} },
+ .{ .name = "ClobberINTEL", .value = 5607, .parameters = &.{.literal_string} },
+ .{ .name = "SideEffectsINTEL", .value = 5608, .parameters = &.{} },
+ .{ .name = "VectorComputeVariableINTEL", .value = 5624, .parameters = &.{} },
+ .{ .name = "FuncParamIOKindINTEL", .value = 5625, .parameters = &.{.literal_integer} },
+ .{ .name = "VectorComputeFunctionINTEL", .value = 5626, .parameters = &.{} },
+ .{ .name = "StackCallINTEL", .value = 5627, .parameters = &.{} },
+ .{ .name = "GlobalVariableOffsetINTEL", .value = 5628, .parameters = &.{.literal_integer} },
+ .{ .name = "CounterBuffer", .value = 5634, .parameters = &.{.id_ref} },
+ .{ .name = "UserSemantic", .value = 5635, .parameters = &.{.literal_string} },
+ .{ .name = "UserTypeGOOGLE", .value = 5636, .parameters = &.{.literal_string} },
+ .{ .name = "FunctionRoundingModeINTEL", .value = 5822, .parameters = &.{ .literal_integer, .fp_rounding_mode } },
+ .{ .name = "FunctionDenormModeINTEL", .value = 5823, .parameters = &.{ .literal_integer, .fp_denorm_mode } },
+ .{ .name = "RegisterINTEL", .value = 5825, .parameters = &.{} },
+ .{ .name = "MemoryINTEL", .value = 5826, .parameters = &.{.literal_string} },
+ .{ .name = "NumbanksINTEL", .value = 5827, .parameters = &.{.literal_integer} },
+ .{ .name = "BankwidthINTEL", .value = 5828, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxPrivateCopiesINTEL", .value = 5829, .parameters = &.{.literal_integer} },
+ .{ .name = "SinglepumpINTEL", .value = 5830, .parameters = &.{} },
+ .{ .name = "DoublepumpINTEL", .value = 5831, .parameters = &.{} },
+ .{ .name = "MaxReplicatesINTEL", .value = 5832, .parameters = &.{.literal_integer} },
+ .{ .name = "SimpleDualPortINTEL", .value = 5833, .parameters = &.{} },
+ .{ .name = "MergeINTEL", .value = 5834, .parameters = &.{ .literal_string, .literal_string } },
+ .{ .name = "BankBitsINTEL", .value = 5835, .parameters = &.{.literal_integer} },
+ .{ .name = "ForcePow2DepthINTEL", .value = 5836, .parameters = &.{.literal_integer} },
+ .{ .name = "StridesizeINTEL", .value = 5883, .parameters = &.{.literal_integer} },
+ .{ .name = "WordsizeINTEL", .value = 5884, .parameters = &.{.literal_integer} },
+ .{ .name = "TrueDualPortINTEL", .value = 5885, .parameters = &.{} },
+ .{ .name = "BurstCoalesceINTEL", .value = 5899, .parameters = &.{} },
+ .{ .name = "CacheSizeINTEL", .value = 5900, .parameters = &.{.literal_integer} },
+ .{ .name = "DontStaticallyCoalesceINTEL", .value = 5901, .parameters = &.{} },
+ .{ .name = "PrefetchINTEL", .value = 5902, .parameters = &.{.literal_integer} },
+ .{ .name = "StallEnableINTEL", .value = 5905, .parameters = &.{} },
+ .{ .name = "FuseLoopsInFunctionINTEL", .value = 5907, .parameters = &.{} },
+ .{ .name = "MathOpDSPModeINTEL", .value = 5909, .parameters = &.{ .literal_integer, .literal_integer } },
+ .{ .name = "AliasScopeINTEL", .value = 5914, .parameters = &.{.id_ref} },
+ .{ .name = "NoAliasINTEL", .value = 5915, .parameters = &.{.id_ref} },
+ .{ .name = "InitiationIntervalINTEL", .value = 5917, .parameters = &.{.literal_integer} },
+ .{ .name = "MaxConcurrencyINTEL", .value = 5918, .parameters = &.{.literal_integer} },
+ .{ .name = "PipelineEnableINTEL", .value = 5919, .parameters = &.{.literal_integer} },
+ .{ .name = "BufferLocationINTEL", .value = 5921, .parameters = &.{.literal_integer} },
+ .{ .name = "IOPipeStorageINTEL", .value = 5944, .parameters = &.{.literal_integer} },
+ .{ .name = "FunctionFloatingPointModeINTEL", .value = 6080, .parameters = &.{ .literal_integer, .fp_operation_mode } },
+ .{ .name = "SingleElementVectorINTEL", .value = 6085, .parameters = &.{} },
+ .{ .name = "VectorComputeCallableFunctionINTEL", .value = 6087, .parameters = &.{} },
+ .{ .name = "MediaBlockIOINTEL", .value = 6140, .parameters = &.{} },
+ .{ .name = "StallFreeINTEL", .value = 6151, .parameters = &.{} },
+ .{ .name = "FPMaxErrorDecorationINTEL", .value = 6170, .parameters = &.{.literal_float} },
+ .{ .name = "LatencyControlLabelINTEL", .value = 6172, .parameters = &.{.literal_integer} },
+ .{ .name = "LatencyControlConstraintINTEL", .value = 6173, .parameters = &.{ .literal_integer, .literal_integer, .literal_integer } },
+ .{ .name = "ConduitKernelArgumentINTEL", .value = 6175, .parameters = &.{} },
+ .{ .name = "RegisterMapKernelArgumentINTEL", .value = 6176, .parameters = &.{} },
+ .{ .name = "MMHostInterfaceAddressWidthINTEL", .value = 6177, .parameters = &.{.literal_integer} },
+ .{ .name = "MMHostInterfaceDataWidthINTEL", .value = 6178, .parameters = &.{.literal_integer} },
+ .{ .name = "MMHostInterfaceLatencyINTEL", .value = 6179, .parameters = &.{.literal_integer} },
+ .{ .name = "MMHostInterfaceReadWriteModeINTEL", .value = 6180, .parameters = &.{.access_qualifier} },
+ .{ .name = "MMHostInterfaceMaxBurstINTEL", .value = 6181, .parameters = &.{.literal_integer} },
+ .{ .name = "MMHostInterfaceWaitRequestINTEL", .value = 6182, .parameters = &.{.literal_integer} },
+ .{ .name = "StableKernelArgumentINTEL", .value = 6183, .parameters = &.{} },
+ .{ .name = "HostAccessINTEL", .value = 6188, .parameters = &.{ .host_access_qualifier, .literal_string } },
+ .{ .name = "InitModeINTEL", .value = 6190, .parameters = &.{.initialization_mode_qualifier} },
+ .{ .name = "ImplementInRegisterMapINTEL", .value = 6191, .parameters = &.{.literal_integer} },
+ .{ .name = "CacheControlLoadINTEL", .value = 6442, .parameters = &.{ .literal_integer, .load_cache_control } },
+ .{ .name = "CacheControlStoreINTEL", .value = 6443, .parameters = &.{ .literal_integer, .store_cache_control } },
+ },
+ .built_in => &.{
+ .{ .name = "Position", .value = 0, .parameters = &.{} },
+ .{ .name = "PointSize", .value = 1, .parameters = &.{} },
+ .{ .name = "ClipDistance", .value = 3, .parameters = &.{} },
+ .{ .name = "CullDistance", .value = 4, .parameters = &.{} },
+ .{ .name = "VertexId", .value = 5, .parameters = &.{} },
+ .{ .name = "InstanceId", .value = 6, .parameters = &.{} },
+ .{ .name = "PrimitiveId", .value = 7, .parameters = &.{} },
+ .{ .name = "InvocationId", .value = 8, .parameters = &.{} },
+ .{ .name = "Layer", .value = 9, .parameters = &.{} },
+ .{ .name = "ViewportIndex", .value = 10, .parameters = &.{} },
+ .{ .name = "TessLevelOuter", .value = 11, .parameters = &.{} },
+ .{ .name = "TessLevelInner", .value = 12, .parameters = &.{} },
+ .{ .name = "TessCoord", .value = 13, .parameters = &.{} },
+ .{ .name = "PatchVertices", .value = 14, .parameters = &.{} },
+ .{ .name = "FragCoord", .value = 15, .parameters = &.{} },
+ .{ .name = "PointCoord", .value = 16, .parameters = &.{} },
+ .{ .name = "FrontFacing", .value = 17, .parameters = &.{} },
+ .{ .name = "SampleId", .value = 18, .parameters = &.{} },
+ .{ .name = "SamplePosition", .value = 19, .parameters = &.{} },
+ .{ .name = "SampleMask", .value = 20, .parameters = &.{} },
+ .{ .name = "FragDepth", .value = 22, .parameters = &.{} },
+ .{ .name = "HelperInvocation", .value = 23, .parameters = &.{} },
+ .{ .name = "NumWorkgroups", .value = 24, .parameters = &.{} },
+ .{ .name = "WorkgroupSize", .value = 25, .parameters = &.{} },
+ .{ .name = "WorkgroupId", .value = 26, .parameters = &.{} },
+ .{ .name = "LocalInvocationId", .value = 27, .parameters = &.{} },
+ .{ .name = "GlobalInvocationId", .value = 28, .parameters = &.{} },
+ .{ .name = "LocalInvocationIndex", .value = 29, .parameters = &.{} },
+ .{ .name = "WorkDim", .value = 30, .parameters = &.{} },
+ .{ .name = "GlobalSize", .value = 31, .parameters = &.{} },
+ .{ .name = "EnqueuedWorkgroupSize", .value = 32, .parameters = &.{} },
+ .{ .name = "GlobalOffset", .value = 33, .parameters = &.{} },
+ .{ .name = "GlobalLinearId", .value = 34, .parameters = &.{} },
+ .{ .name = "SubgroupSize", .value = 36, .parameters = &.{} },
+ .{ .name = "SubgroupMaxSize", .value = 37, .parameters = &.{} },
+ .{ .name = "NumSubgroups", .value = 38, .parameters = &.{} },
+ .{ .name = "NumEnqueuedSubgroups", .value = 39, .parameters = &.{} },
+ .{ .name = "SubgroupId", .value = 40, .parameters = &.{} },
+ .{ .name = "SubgroupLocalInvocationId", .value = 41, .parameters = &.{} },
+ .{ .name = "VertexIndex", .value = 42, .parameters = &.{} },
+ .{ .name = "InstanceIndex", .value = 43, .parameters = &.{} },
+ .{ .name = "CoreIDARM", .value = 4160, .parameters = &.{} },
+ .{ .name = "CoreCountARM", .value = 4161, .parameters = &.{} },
+ .{ .name = "CoreMaxIDARM", .value = 4162, .parameters = &.{} },
+ .{ .name = "WarpIDARM", .value = 4163, .parameters = &.{} },
+ .{ .name = "WarpMaxIDARM", .value = 4164, .parameters = &.{} },
+ .{ .name = "SubgroupEqMask", .value = 4416, .parameters = &.{} },
+ .{ .name = "SubgroupGeMask", .value = 4417, .parameters = &.{} },
+ .{ .name = "SubgroupGtMask", .value = 4418, .parameters = &.{} },
+ .{ .name = "SubgroupLeMask", .value = 4419, .parameters = &.{} },
+ .{ .name = "SubgroupLtMask", .value = 4420, .parameters = &.{} },
+ .{ .name = "BaseVertex", .value = 4424, .parameters = &.{} },
+ .{ .name = "BaseInstance", .value = 4425, .parameters = &.{} },
+ .{ .name = "DrawIndex", .value = 4426, .parameters = &.{} },
+ .{ .name = "PrimitiveShadingRateKHR", .value = 4432, .parameters = &.{} },
+ .{ .name = "DeviceIndex", .value = 4438, .parameters = &.{} },
+ .{ .name = "ViewIndex", .value = 4440, .parameters = &.{} },
+ .{ .name = "ShadingRateKHR", .value = 4444, .parameters = &.{} },
+ .{ .name = "TileOffsetQCOM", .value = 4492, .parameters = &.{} },
+ .{ .name = "TileDimensionQCOM", .value = 4493, .parameters = &.{} },
+ .{ .name = "TileApronSizeQCOM", .value = 4494, .parameters = &.{} },
+ .{ .name = "BaryCoordNoPerspAMD", .value = 4992, .parameters = &.{} },
+ .{ .name = "BaryCoordNoPerspCentroidAMD", .value = 4993, .parameters = &.{} },
+ .{ .name = "BaryCoordNoPerspSampleAMD", .value = 4994, .parameters = &.{} },
+ .{ .name = "BaryCoordSmoothAMD", .value = 4995, .parameters = &.{} },
+ .{ .name = "BaryCoordSmoothCentroidAMD", .value = 4996, .parameters = &.{} },
+ .{ .name = "BaryCoordSmoothSampleAMD", .value = 4997, .parameters = &.{} },
+ .{ .name = "BaryCoordPullModelAMD", .value = 4998, .parameters = &.{} },
+ .{ .name = "FragStencilRefEXT", .value = 5014, .parameters = &.{} },
+ .{ .name = "RemainingRecursionLevelsAMDX", .value = 5021, .parameters = &.{} },
+ .{ .name = "ShaderIndexAMDX", .value = 5073, .parameters = &.{} },
+ .{ .name = "ViewportMaskNV", .value = 5253, .parameters = &.{} },
+ .{ .name = "SecondaryPositionNV", .value = 5257, .parameters = &.{} },
+ .{ .name = "SecondaryViewportMaskNV", .value = 5258, .parameters = &.{} },
+ .{ .name = "PositionPerViewNV", .value = 5261, .parameters = &.{} },
+ .{ .name = "ViewportMaskPerViewNV", .value = 5262, .parameters = &.{} },
+ .{ .name = "FullyCoveredEXT", .value = 5264, .parameters = &.{} },
+ .{ .name = "TaskCountNV", .value = 5274, .parameters = &.{} },
+ .{ .name = "PrimitiveCountNV", .value = 5275, .parameters = &.{} },
+ .{ .name = "PrimitiveIndicesNV", .value = 5276, .parameters = &.{} },
+ .{ .name = "ClipDistancePerViewNV", .value = 5277, .parameters = &.{} },
+ .{ .name = "CullDistancePerViewNV", .value = 5278, .parameters = &.{} },
+ .{ .name = "LayerPerViewNV", .value = 5279, .parameters = &.{} },
+ .{ .name = "MeshViewCountNV", .value = 5280, .parameters = &.{} },
+ .{ .name = "MeshViewIndicesNV", .value = 5281, .parameters = &.{} },
+ .{ .name = "BaryCoordKHR", .value = 5286, .parameters = &.{} },
+ .{ .name = "BaryCoordNoPerspKHR", .value = 5287, .parameters = &.{} },
+ .{ .name = "FragSizeEXT", .value = 5292, .parameters = &.{} },
+ .{ .name = "FragInvocationCountEXT", .value = 5293, .parameters = &.{} },
+ .{ .name = "PrimitivePointIndicesEXT", .value = 5294, .parameters = &.{} },
+ .{ .name = "PrimitiveLineIndicesEXT", .value = 5295, .parameters = &.{} },
+ .{ .name = "PrimitiveTriangleIndicesEXT", .value = 5296, .parameters = &.{} },
+ .{ .name = "CullPrimitiveEXT", .value = 5299, .parameters = &.{} },
+ .{ .name = "LaunchIdKHR", .value = 5319, .parameters = &.{} },
+ .{ .name = "LaunchSizeKHR", .value = 5320, .parameters = &.{} },
+ .{ .name = "WorldRayOriginKHR", .value = 5321, .parameters = &.{} },
+ .{ .name = "WorldRayDirectionKHR", .value = 5322, .parameters = &.{} },
+ .{ .name = "ObjectRayOriginKHR", .value = 5323, .parameters = &.{} },
+ .{ .name = "ObjectRayDirectionKHR", .value = 5324, .parameters = &.{} },
+ .{ .name = "RayTminKHR", .value = 5325, .parameters = &.{} },
+ .{ .name = "RayTmaxKHR", .value = 5326, .parameters = &.{} },
+ .{ .name = "InstanceCustomIndexKHR", .value = 5327, .parameters = &.{} },
+ .{ .name = "ObjectToWorldKHR", .value = 5330, .parameters = &.{} },
+ .{ .name = "WorldToObjectKHR", .value = 5331, .parameters = &.{} },
+ .{ .name = "HitTNV", .value = 5332, .parameters = &.{} },
+ .{ .name = "HitKindKHR", .value = 5333, .parameters = &.{} },
+ .{ .name = "CurrentRayTimeNV", .value = 5334, .parameters = &.{} },
+ .{ .name = "HitTriangleVertexPositionsKHR", .value = 5335, .parameters = &.{} },
+ .{ .name = "HitMicroTriangleVertexPositionsNV", .value = 5337, .parameters = &.{} },
+ .{ .name = "HitMicroTriangleVertexBarycentricsNV", .value = 5344, .parameters = &.{} },
+ .{ .name = "IncomingRayFlagsKHR", .value = 5351, .parameters = &.{} },
+ .{ .name = "RayGeometryIndexKHR", .value = 5352, .parameters = &.{} },
+ .{ .name = "HitIsSphereNV", .value = 5359, .parameters = &.{} },
+ .{ .name = "HitIsLSSNV", .value = 5360, .parameters = &.{} },
+ .{ .name = "HitSpherePositionNV", .value = 5361, .parameters = &.{} },
+ .{ .name = "WarpsPerSMNV", .value = 5374, .parameters = &.{} },
+ .{ .name = "SMCountNV", .value = 5375, .parameters = &.{} },
+ .{ .name = "WarpIDNV", .value = 5376, .parameters = &.{} },
+ .{ .name = "SMIDNV", .value = 5377, .parameters = &.{} },
+ .{ .name = "HitLSSPositionsNV", .value = 5396, .parameters = &.{} },
+ .{ .name = "HitKindFrontFacingMicroTriangleNV", .value = 5405, .parameters = &.{} },
+ .{ .name = "HitKindBackFacingMicroTriangleNV", .value = 5406, .parameters = &.{} },
+ .{ .name = "HitSphereRadiusNV", .value = 5420, .parameters = &.{} },
+ .{ .name = "HitLSSRadiiNV", .value = 5421, .parameters = &.{} },
+ .{ .name = "ClusterIDNV", .value = 5436, .parameters = &.{} },
+ .{ .name = "CullMaskKHR", .value = 6021, .parameters = &.{} },
+ },
+ .scope => &.{
+ .{ .name = "CrossDevice", .value = 0, .parameters = &.{} },
+ .{ .name = "Device", .value = 1, .parameters = &.{} },
+ .{ .name = "Workgroup", .value = 2, .parameters = &.{} },
+ .{ .name = "Subgroup", .value = 3, .parameters = &.{} },
+ .{ .name = "Invocation", .value = 4, .parameters = &.{} },
+ .{ .name = "QueueFamily", .value = 5, .parameters = &.{} },
+ .{ .name = "ShaderCallKHR", .value = 6, .parameters = &.{} },
+ },
+ .group_operation => &.{
+ .{ .name = "Reduce", .value = 0, .parameters = &.{} },
+ .{ .name = "InclusiveScan", .value = 1, .parameters = &.{} },
+ .{ .name = "ExclusiveScan", .value = 2, .parameters = &.{} },
+ .{ .name = "ClusteredReduce", .value = 3, .parameters = &.{} },
+ .{ .name = "PartitionedReduceNV", .value = 6, .parameters = &.{} },
+ .{ .name = "PartitionedInclusiveScanNV", .value = 7, .parameters = &.{} },
+ .{ .name = "PartitionedExclusiveScanNV", .value = 8, .parameters = &.{} },
+ },
+ .kernel_enqueue_flags => &.{
+ .{ .name = "NoWait", .value = 0, .parameters = &.{} },
+ .{ .name = "WaitKernel", .value = 1, .parameters = &.{} },
+ .{ .name = "WaitWorkGroup", .value = 2, .parameters = &.{} },
+ },
+ .capability => &.{
+ .{ .name = "Matrix", .value = 0, .parameters = &.{} },
+ .{ .name = "Shader", .value = 1, .parameters = &.{} },
+ .{ .name = "Geometry", .value = 2, .parameters = &.{} },
+ .{ .name = "Tessellation", .value = 3, .parameters = &.{} },
+ .{ .name = "Addresses", .value = 4, .parameters = &.{} },
+ .{ .name = "Linkage", .value = 5, .parameters = &.{} },
+ .{ .name = "Kernel", .value = 6, .parameters = &.{} },
+ .{ .name = "Vector16", .value = 7, .parameters = &.{} },
+ .{ .name = "Float16Buffer", .value = 8, .parameters = &.{} },
+ .{ .name = "Float16", .value = 9, .parameters = &.{} },
+ .{ .name = "Float64", .value = 10, .parameters = &.{} },
+ .{ .name = "Int64", .value = 11, .parameters = &.{} },
+ .{ .name = "Int64Atomics", .value = 12, .parameters = &.{} },
+ .{ .name = "ImageBasic", .value = 13, .parameters = &.{} },
+ .{ .name = "ImageReadWrite", .value = 14, .parameters = &.{} },
+ .{ .name = "ImageMipmap", .value = 15, .parameters = &.{} },
+ .{ .name = "Pipes", .value = 17, .parameters = &.{} },
+ .{ .name = "Groups", .value = 18, .parameters = &.{} },
+ .{ .name = "DeviceEnqueue", .value = 19, .parameters = &.{} },
+ .{ .name = "LiteralSampler", .value = 20, .parameters = &.{} },
+ .{ .name = "AtomicStorage", .value = 21, .parameters = &.{} },
+ .{ .name = "Int16", .value = 22, .parameters = &.{} },
+ .{ .name = "TessellationPointSize", .value = 23, .parameters = &.{} },
+ .{ .name = "GeometryPointSize", .value = 24, .parameters = &.{} },
+ .{ .name = "ImageGatherExtended", .value = 25, .parameters = &.{} },
+ .{ .name = "StorageImageMultisample", .value = 27, .parameters = &.{} },
+ .{ .name = "UniformBufferArrayDynamicIndexing", .value = 28, .parameters = &.{} },
+ .{ .name = "SampledImageArrayDynamicIndexing", .value = 29, .parameters = &.{} },
+ .{ .name = "StorageBufferArrayDynamicIndexing", .value = 30, .parameters = &.{} },
+ .{ .name = "StorageImageArrayDynamicIndexing", .value = 31, .parameters = &.{} },
+ .{ .name = "ClipDistance", .value = 32, .parameters = &.{} },
+ .{ .name = "CullDistance", .value = 33, .parameters = &.{} },
+ .{ .name = "ImageCubeArray", .value = 34, .parameters = &.{} },
+ .{ .name = "SampleRateShading", .value = 35, .parameters = &.{} },
+ .{ .name = "ImageRect", .value = 36, .parameters = &.{} },
+ .{ .name = "SampledRect", .value = 37, .parameters = &.{} },
+ .{ .name = "GenericPointer", .value = 38, .parameters = &.{} },
+ .{ .name = "Int8", .value = 39, .parameters = &.{} },
+ .{ .name = "InputAttachment", .value = 40, .parameters = &.{} },
+ .{ .name = "SparseResidency", .value = 41, .parameters = &.{} },
+ .{ .name = "MinLod", .value = 42, .parameters = &.{} },
+ .{ .name = "Sampled1D", .value = 43, .parameters = &.{} },
+ .{ .name = "Image1D", .value = 44, .parameters = &.{} },
+ .{ .name = "SampledCubeArray", .value = 45, .parameters = &.{} },
+ .{ .name = "SampledBuffer", .value = 46, .parameters = &.{} },
+ .{ .name = "ImageBuffer", .value = 47, .parameters = &.{} },
+ .{ .name = "ImageMSArray", .value = 48, .parameters = &.{} },
+ .{ .name = "StorageImageExtendedFormats", .value = 49, .parameters = &.{} },
+ .{ .name = "ImageQuery", .value = 50, .parameters = &.{} },
+ .{ .name = "DerivativeControl", .value = 51, .parameters = &.{} },
+ .{ .name = "InterpolationFunction", .value = 52, .parameters = &.{} },
+ .{ .name = "TransformFeedback", .value = 53, .parameters = &.{} },
+ .{ .name = "GeometryStreams", .value = 54, .parameters = &.{} },
+ .{ .name = "StorageImageReadWithoutFormat", .value = 55, .parameters = &.{} },
+ .{ .name = "StorageImageWriteWithoutFormat", .value = 56, .parameters = &.{} },
+ .{ .name = "MultiViewport", .value = 57, .parameters = &.{} },
+ .{ .name = "SubgroupDispatch", .value = 58, .parameters = &.{} },
+ .{ .name = "NamedBarrier", .value = 59, .parameters = &.{} },
+ .{ .name = "PipeStorage", .value = 60, .parameters = &.{} },
+ .{ .name = "GroupNonUniform", .value = 61, .parameters = &.{} },
+ .{ .name = "GroupNonUniformVote", .value = 62, .parameters = &.{} },
+ .{ .name = "GroupNonUniformArithmetic", .value = 63, .parameters = &.{} },
+ .{ .name = "GroupNonUniformBallot", .value = 64, .parameters = &.{} },
+ .{ .name = "GroupNonUniformShuffle", .value = 65, .parameters = &.{} },
+ .{ .name = "GroupNonUniformShuffleRelative", .value = 66, .parameters = &.{} },
+ .{ .name = "GroupNonUniformClustered", .value = 67, .parameters = &.{} },
+ .{ .name = "GroupNonUniformQuad", .value = 68, .parameters = &.{} },
+ .{ .name = "ShaderLayer", .value = 69, .parameters = &.{} },
+ .{ .name = "ShaderViewportIndex", .value = 70, .parameters = &.{} },
+ .{ .name = "UniformDecoration", .value = 71, .parameters = &.{} },
+ .{ .name = "CoreBuiltinsARM", .value = 4165, .parameters = &.{} },
+ .{ .name = "TileImageColorReadAccessEXT", .value = 4166, .parameters = &.{} },
+ .{ .name = "TileImageDepthReadAccessEXT", .value = 4167, .parameters = &.{} },
+ .{ .name = "TileImageStencilReadAccessEXT", .value = 4168, .parameters = &.{} },
+ .{ .name = "TensorsARM", .value = 4174, .parameters = &.{} },
+ .{ .name = "StorageTensorArrayDynamicIndexingARM", .value = 4175, .parameters = &.{} },
+ .{ .name = "StorageTensorArrayNonUniformIndexingARM", .value = 4176, .parameters = &.{} },
+ .{ .name = "GraphARM", .value = 4191, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixLayoutsARM", .value = 4201, .parameters = &.{} },
+ .{ .name = "Float8EXT", .value = 4212, .parameters = &.{} },
+ .{ .name = "Float8CooperativeMatrixEXT", .value = 4213, .parameters = &.{} },
+ .{ .name = "FragmentShadingRateKHR", .value = 4422, .parameters = &.{} },
+ .{ .name = "SubgroupBallotKHR", .value = 4423, .parameters = &.{} },
+ .{ .name = "DrawParameters", .value = 4427, .parameters = &.{} },
+ .{ .name = "WorkgroupMemoryExplicitLayoutKHR", .value = 4428, .parameters = &.{} },
+ .{ .name = "WorkgroupMemoryExplicitLayout8BitAccessKHR", .value = 4429, .parameters = &.{} },
+ .{ .name = "WorkgroupMemoryExplicitLayout16BitAccessKHR", .value = 4430, .parameters = &.{} },
+ .{ .name = "SubgroupVoteKHR", .value = 4431, .parameters = &.{} },
+ .{ .name = "StorageBuffer16BitAccess", .value = 4433, .parameters = &.{} },
+ .{ .name = "UniformAndStorageBuffer16BitAccess", .value = 4434, .parameters = &.{} },
+ .{ .name = "StoragePushConstant16", .value = 4435, .parameters = &.{} },
+ .{ .name = "StorageInputOutput16", .value = 4436, .parameters = &.{} },
+ .{ .name = "DeviceGroup", .value = 4437, .parameters = &.{} },
+ .{ .name = "MultiView", .value = 4439, .parameters = &.{} },
+ .{ .name = "VariablePointersStorageBuffer", .value = 4441, .parameters = &.{} },
+ .{ .name = "VariablePointers", .value = 4442, .parameters = &.{} },
+ .{ .name = "AtomicStorageOps", .value = 4445, .parameters = &.{} },
+ .{ .name = "SampleMaskPostDepthCoverage", .value = 4447, .parameters = &.{} },
+ .{ .name = "StorageBuffer8BitAccess", .value = 4448, .parameters = &.{} },
+ .{ .name = "UniformAndStorageBuffer8BitAccess", .value = 4449, .parameters = &.{} },
+ .{ .name = "StoragePushConstant8", .value = 4450, .parameters = &.{} },
+ .{ .name = "DenormPreserve", .value = 4464, .parameters = &.{} },
+ .{ .name = "DenormFlushToZero", .value = 4465, .parameters = &.{} },
+ .{ .name = "SignedZeroInfNanPreserve", .value = 4466, .parameters = &.{} },
+ .{ .name = "RoundingModeRTE", .value = 4467, .parameters = &.{} },
+ .{ .name = "RoundingModeRTZ", .value = 4468, .parameters = &.{} },
+ .{ .name = "RayQueryProvisionalKHR", .value = 4471, .parameters = &.{} },
+ .{ .name = "RayQueryKHR", .value = 4472, .parameters = &.{} },
+ .{ .name = "UntypedPointersKHR", .value = 4473, .parameters = &.{} },
+ .{ .name = "RayTraversalPrimitiveCullingKHR", .value = 4478, .parameters = &.{} },
+ .{ .name = "RayTracingKHR", .value = 4479, .parameters = &.{} },
+ .{ .name = "TextureSampleWeightedQCOM", .value = 4484, .parameters = &.{} },
+ .{ .name = "TextureBoxFilterQCOM", .value = 4485, .parameters = &.{} },
+ .{ .name = "TextureBlockMatchQCOM", .value = 4486, .parameters = &.{} },
+ .{ .name = "TileShadingQCOM", .value = 4495, .parameters = &.{} },
+ .{ .name = "TextureBlockMatch2QCOM", .value = 4498, .parameters = &.{} },
+ .{ .name = "Float16ImageAMD", .value = 5008, .parameters = &.{} },
+ .{ .name = "ImageGatherBiasLodAMD", .value = 5009, .parameters = &.{} },
+ .{ .name = "FragmentMaskAMD", .value = 5010, .parameters = &.{} },
+ .{ .name = "StencilExportEXT", .value = 5013, .parameters = &.{} },
+ .{ .name = "ImageReadWriteLodAMD", .value = 5015, .parameters = &.{} },
+ .{ .name = "Int64ImageEXT", .value = 5016, .parameters = &.{} },
+ .{ .name = "ShaderClockKHR", .value = 5055, .parameters = &.{} },
+ .{ .name = "ShaderEnqueueAMDX", .value = 5067, .parameters = &.{} },
+ .{ .name = "QuadControlKHR", .value = 5087, .parameters = &.{} },
+ .{ .name = "Int4TypeINTEL", .value = 5112, .parameters = &.{} },
+ .{ .name = "Int4CooperativeMatrixINTEL", .value = 5114, .parameters = &.{} },
+ .{ .name = "BFloat16TypeKHR", .value = 5116, .parameters = &.{} },
+ .{ .name = "BFloat16DotProductKHR", .value = 5117, .parameters = &.{} },
+ .{ .name = "BFloat16CooperativeMatrixKHR", .value = 5118, .parameters = &.{} },
+ .{ .name = "SampleMaskOverrideCoverageNV", .value = 5249, .parameters = &.{} },
+ .{ .name = "GeometryShaderPassthroughNV", .value = 5251, .parameters = &.{} },
+ .{ .name = "ShaderViewportIndexLayerEXT", .value = 5254, .parameters = &.{} },
+ .{ .name = "ShaderViewportMaskNV", .value = 5255, .parameters = &.{} },
+ .{ .name = "ShaderStereoViewNV", .value = 5259, .parameters = &.{} },
+ .{ .name = "PerViewAttributesNV", .value = 5260, .parameters = &.{} },
+ .{ .name = "FragmentFullyCoveredEXT", .value = 5265, .parameters = &.{} },
+ .{ .name = "MeshShadingNV", .value = 5266, .parameters = &.{} },
+ .{ .name = "ImageFootprintNV", .value = 5282, .parameters = &.{} },
+ .{ .name = "MeshShadingEXT", .value = 5283, .parameters = &.{} },
+ .{ .name = "FragmentBarycentricKHR", .value = 5284, .parameters = &.{} },
+ .{ .name = "ComputeDerivativeGroupQuadsKHR", .value = 5288, .parameters = &.{} },
+ .{ .name = "FragmentDensityEXT", .value = 5291, .parameters = &.{} },
+ .{ .name = "GroupNonUniformPartitionedNV", .value = 5297, .parameters = &.{} },
+ .{ .name = "ShaderNonUniform", .value = 5301, .parameters = &.{} },
+ .{ .name = "RuntimeDescriptorArray", .value = 5302, .parameters = &.{} },
+ .{ .name = "InputAttachmentArrayDynamicIndexing", .value = 5303, .parameters = &.{} },
+ .{ .name = "UniformTexelBufferArrayDynamicIndexing", .value = 5304, .parameters = &.{} },
+ .{ .name = "StorageTexelBufferArrayDynamicIndexing", .value = 5305, .parameters = &.{} },
+ .{ .name = "UniformBufferArrayNonUniformIndexing", .value = 5306, .parameters = &.{} },
+ .{ .name = "SampledImageArrayNonUniformIndexing", .value = 5307, .parameters = &.{} },
+ .{ .name = "StorageBufferArrayNonUniformIndexing", .value = 5308, .parameters = &.{} },
+ .{ .name = "StorageImageArrayNonUniformIndexing", .value = 5309, .parameters = &.{} },
+ .{ .name = "InputAttachmentArrayNonUniformIndexing", .value = 5310, .parameters = &.{} },
+ .{ .name = "UniformTexelBufferArrayNonUniformIndexing", .value = 5311, .parameters = &.{} },
+ .{ .name = "StorageTexelBufferArrayNonUniformIndexing", .value = 5312, .parameters = &.{} },
+ .{ .name = "RayTracingPositionFetchKHR", .value = 5336, .parameters = &.{} },
+ .{ .name = "RayTracingNV", .value = 5340, .parameters = &.{} },
+ .{ .name = "RayTracingMotionBlurNV", .value = 5341, .parameters = &.{} },
+ .{ .name = "VulkanMemoryModel", .value = 5345, .parameters = &.{} },
+ .{ .name = "VulkanMemoryModelDeviceScope", .value = 5346, .parameters = &.{} },
+ .{ .name = "PhysicalStorageBufferAddresses", .value = 5347, .parameters = &.{} },
+ .{ .name = "ComputeDerivativeGroupLinearKHR", .value = 5350, .parameters = &.{} },
+ .{ .name = "RayTracingProvisionalKHR", .value = 5353, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixNV", .value = 5357, .parameters = &.{} },
+ .{ .name = "FragmentShaderSampleInterlockEXT", .value = 5363, .parameters = &.{} },
+ .{ .name = "FragmentShaderShadingRateInterlockEXT", .value = 5372, .parameters = &.{} },
+ .{ .name = "ShaderSMBuiltinsNV", .value = 5373, .parameters = &.{} },
+ .{ .name = "FragmentShaderPixelInterlockEXT", .value = 5378, .parameters = &.{} },
+ .{ .name = "DemoteToHelperInvocation", .value = 5379, .parameters = &.{} },
+ .{ .name = "DisplacementMicromapNV", .value = 5380, .parameters = &.{} },
+ .{ .name = "RayTracingOpacityMicromapEXT", .value = 5381, .parameters = &.{} },
+ .{ .name = "ShaderInvocationReorderNV", .value = 5383, .parameters = &.{} },
+ .{ .name = "BindlessTextureNV", .value = 5390, .parameters = &.{} },
+ .{ .name = "RayQueryPositionFetchKHR", .value = 5391, .parameters = &.{} },
+ .{ .name = "CooperativeVectorNV", .value = 5394, .parameters = &.{} },
+ .{ .name = "AtomicFloat16VectorNV", .value = 5404, .parameters = &.{} },
+ .{ .name = "RayTracingDisplacementMicromapNV", .value = 5409, .parameters = &.{} },
+ .{ .name = "RawAccessChainsNV", .value = 5414, .parameters = &.{} },
+ .{ .name = "RayTracingSpheresGeometryNV", .value = 5418, .parameters = &.{} },
+ .{ .name = "RayTracingLinearSweptSpheresGeometryNV", .value = 5419, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixReductionsNV", .value = 5430, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixConversionsNV", .value = 5431, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixPerElementOperationsNV", .value = 5432, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixTensorAddressingNV", .value = 5433, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixBlockLoadsNV", .value = 5434, .parameters = &.{} },
+ .{ .name = "CooperativeVectorTrainingNV", .value = 5435, .parameters = &.{} },
+ .{ .name = "RayTracingClusterAccelerationStructureNV", .value = 5437, .parameters = &.{} },
+ .{ .name = "TensorAddressingNV", .value = 5439, .parameters = &.{} },
+ .{ .name = "SubgroupShuffleINTEL", .value = 5568, .parameters = &.{} },
+ .{ .name = "SubgroupBufferBlockIOINTEL", .value = 5569, .parameters = &.{} },
+ .{ .name = "SubgroupImageBlockIOINTEL", .value = 5570, .parameters = &.{} },
+ .{ .name = "SubgroupImageMediaBlockIOINTEL", .value = 5579, .parameters = &.{} },
+ .{ .name = "RoundToInfinityINTEL", .value = 5582, .parameters = &.{} },
+ .{ .name = "FloatingPointModeINTEL", .value = 5583, .parameters = &.{} },
+ .{ .name = "IntegerFunctions2INTEL", .value = 5584, .parameters = &.{} },
+ .{ .name = "FunctionPointersINTEL", .value = 5603, .parameters = &.{} },
+ .{ .name = "IndirectReferencesINTEL", .value = 5604, .parameters = &.{} },
+ .{ .name = "AsmINTEL", .value = 5606, .parameters = &.{} },
+ .{ .name = "AtomicFloat32MinMaxEXT", .value = 5612, .parameters = &.{} },
+ .{ .name = "AtomicFloat64MinMaxEXT", .value = 5613, .parameters = &.{} },
+ .{ .name = "AtomicFloat16MinMaxEXT", .value = 5616, .parameters = &.{} },
+ .{ .name = "VectorComputeINTEL", .value = 5617, .parameters = &.{} },
+ .{ .name = "VectorAnyINTEL", .value = 5619, .parameters = &.{} },
+ .{ .name = "ExpectAssumeKHR", .value = 5629, .parameters = &.{} },
+ .{ .name = "SubgroupAvcMotionEstimationINTEL", .value = 5696, .parameters = &.{} },
+ .{ .name = "SubgroupAvcMotionEstimationIntraINTEL", .value = 5697, .parameters = &.{} },
+ .{ .name = "SubgroupAvcMotionEstimationChromaINTEL", .value = 5698, .parameters = &.{} },
+ .{ .name = "VariableLengthArrayINTEL", .value = 5817, .parameters = &.{} },
+ .{ .name = "FunctionFloatControlINTEL", .value = 5821, .parameters = &.{} },
+ .{ .name = "FPGAMemoryAttributesINTEL", .value = 5824, .parameters = &.{} },
+ .{ .name = "FPFastMathModeINTEL", .value = 5837, .parameters = &.{} },
+ .{ .name = "ArbitraryPrecisionIntegersINTEL", .value = 5844, .parameters = &.{} },
+ .{ .name = "ArbitraryPrecisionFloatingPointINTEL", .value = 5845, .parameters = &.{} },
+ .{ .name = "UnstructuredLoopControlsINTEL", .value = 5886, .parameters = &.{} },
+ .{ .name = "FPGALoopControlsINTEL", .value = 5888, .parameters = &.{} },
+ .{ .name = "KernelAttributesINTEL", .value = 5892, .parameters = &.{} },
+ .{ .name = "FPGAKernelAttributesINTEL", .value = 5897, .parameters = &.{} },
+ .{ .name = "FPGAMemoryAccessesINTEL", .value = 5898, .parameters = &.{} },
+ .{ .name = "FPGAClusterAttributesINTEL", .value = 5904, .parameters = &.{} },
+ .{ .name = "LoopFuseINTEL", .value = 5906, .parameters = &.{} },
+ .{ .name = "FPGADSPControlINTEL", .value = 5908, .parameters = &.{} },
+ .{ .name = "MemoryAccessAliasingINTEL", .value = 5910, .parameters = &.{} },
+ .{ .name = "FPGAInvocationPipeliningAttributesINTEL", .value = 5916, .parameters = &.{} },
+ .{ .name = "FPGABufferLocationINTEL", .value = 5920, .parameters = &.{} },
+ .{ .name = "ArbitraryPrecisionFixedPointINTEL", .value = 5922, .parameters = &.{} },
+ .{ .name = "USMStorageClassesINTEL", .value = 5935, .parameters = &.{} },
+ .{ .name = "RuntimeAlignedAttributeINTEL", .value = 5939, .parameters = &.{} },
+ .{ .name = "IOPipesINTEL", .value = 5943, .parameters = &.{} },
+ .{ .name = "BlockingPipesINTEL", .value = 5945, .parameters = &.{} },
+ .{ .name = "FPGARegINTEL", .value = 5948, .parameters = &.{} },
+ .{ .name = "DotProductInputAll", .value = 6016, .parameters = &.{} },
+ .{ .name = "DotProductInput4x8Bit", .value = 6017, .parameters = &.{} },
+ .{ .name = "DotProductInput4x8BitPacked", .value = 6018, .parameters = &.{} },
+ .{ .name = "DotProduct", .value = 6019, .parameters = &.{} },
+ .{ .name = "RayCullMaskKHR", .value = 6020, .parameters = &.{} },
+ .{ .name = "CooperativeMatrixKHR", .value = 6022, .parameters = &.{} },
+ .{ .name = "ReplicatedCompositesEXT", .value = 6024, .parameters = &.{} },
+ .{ .name = "BitInstructions", .value = 6025, .parameters = &.{} },
+ .{ .name = "GroupNonUniformRotateKHR", .value = 6026, .parameters = &.{} },
+ .{ .name = "FloatControls2", .value = 6029, .parameters = &.{} },
+ .{ .name = "AtomicFloat32AddEXT", .value = 6033, .parameters = &.{} },
+ .{ .name = "AtomicFloat64AddEXT", .value = 6034, .parameters = &.{} },
+ .{ .name = "LongCompositesINTEL", .value = 6089, .parameters = &.{} },
+ .{ .name = "OptNoneEXT", .value = 6094, .parameters = &.{} },
+ .{ .name = "AtomicFloat16AddEXT", .value = 6095, .parameters = &.{} },
+ .{ .name = "DebugInfoModuleINTEL", .value = 6114, .parameters = &.{} },
+ .{ .name = "BFloat16ConversionINTEL", .value = 6115, .parameters = &.{} },
+ .{ .name = "SplitBarrierINTEL", .value = 6141, .parameters = &.{} },
+ .{ .name = "ArithmeticFenceEXT", .value = 6144, .parameters = &.{} },
+ .{ .name = "FPGAClusterAttributesV2INTEL", .value = 6150, .parameters = &.{} },
+ .{ .name = "FPGAKernelAttributesv2INTEL", .value = 6161, .parameters = &.{} },
+ .{ .name = "TaskSequenceINTEL", .value = 6162, .parameters = &.{} },
+ .{ .name = "FPMaxErrorINTEL", .value = 6169, .parameters = &.{} },
+ .{ .name = "FPGALatencyControlINTEL", .value = 6171, .parameters = &.{} },
+ .{ .name = "FPGAArgumentInterfacesINTEL", .value = 6174, .parameters = &.{} },
+ .{ .name = "GlobalVariableHostAccessINTEL", .value = 6187, .parameters = &.{} },
+ .{ .name = "GlobalVariableFPGADecorationsINTEL", .value = 6189, .parameters = &.{} },
+ .{ .name = "SubgroupBufferPrefetchINTEL", .value = 6220, .parameters = &.{} },
+ .{ .name = "Subgroup2DBlockIOINTEL", .value = 6228, .parameters = &.{} },
+ .{ .name = "Subgroup2DBlockTransformINTEL", .value = 6229, .parameters = &.{} },
+ .{ .name = "Subgroup2DBlockTransposeINTEL", .value = 6230, .parameters = &.{} },
+ .{ .name = "SubgroupMatrixMultiplyAccumulateINTEL", .value = 6236, .parameters = &.{} },
+ .{ .name = "TernaryBitwiseFunctionINTEL", .value = 6241, .parameters = &.{} },
+ .{ .name = "GroupUniformArithmeticKHR", .value = 6400, .parameters = &.{} },
+ .{ .name = "TensorFloat32RoundingINTEL", .value = 6425, .parameters = &.{} },
+ .{ .name = "MaskedGatherScatterINTEL", .value = 6427, .parameters = &.{} },
+ .{ .name = "CacheControlsINTEL", .value = 6441, .parameters = &.{} },
+ .{ .name = "RegisterLimitsINTEL", .value = 6460, .parameters = &.{} },
+ .{ .name = "BindlessImagesINTEL", .value = 6528, .parameters = &.{} },
+ },
+ .ray_query_intersection => &.{
+ .{ .name = "RayQueryCandidateIntersectionKHR", .value = 0, .parameters = &.{} },
+ .{ .name = "RayQueryCommittedIntersectionKHR", .value = 1, .parameters = &.{} },
+ },
+ .ray_query_committed_intersection_type => &.{
+ .{ .name = "RayQueryCommittedIntersectionNoneKHR", .value = 0, .parameters = &.{} },
+ .{ .name = "RayQueryCommittedIntersectionTriangleKHR", .value = 1, .parameters = &.{} },
+ .{ .name = "RayQueryCommittedIntersectionGeneratedKHR", .value = 2, .parameters = &.{} },
+ },
+ .ray_query_candidate_intersection_type => &.{
+ .{ .name = "RayQueryCandidateIntersectionTriangleKHR", .value = 0, .parameters = &.{} },
+ .{ .name = "RayQueryCandidateIntersectionAABBKHR", .value = 1, .parameters = &.{} },
+ },
+ .packed_vector_format => &.{
+ .{ .name = "PackedVectorFormat4x8Bit", .value = 0, .parameters = &.{} },
+ },
+ .cooperative_matrix_operands => &.{
+ .{ .name = "NoneKHR", .value = 0x0000, .parameters = &.{} },
+ .{ .name = "MatrixASignedComponentsKHR", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "MatrixBSignedComponentsKHR", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "MatrixCSignedComponentsKHR", .value = 0x0004, .parameters = &.{} },
+ .{ .name = "MatrixResultSignedComponentsKHR", .value = 0x0008, .parameters = &.{} },
+ .{ .name = "SaturatingAccumulationKHR", .value = 0x0010, .parameters = &.{} },
+ },
+ .cooperative_matrix_layout => &.{
+ .{ .name = "RowMajorKHR", .value = 0, .parameters = &.{} },
+ .{ .name = "ColumnMajorKHR", .value = 1, .parameters = &.{} },
+ .{ .name = "RowBlockedInterleavedARM", .value = 4202, .parameters = &.{} },
+ .{ .name = "ColumnBlockedInterleavedARM", .value = 4203, .parameters = &.{} },
+ },
+ .cooperative_matrix_use => &.{
+ .{ .name = "MatrixAKHR", .value = 0, .parameters = &.{} },
+ .{ .name = "MatrixBKHR", .value = 1, .parameters = &.{} },
+ .{ .name = "MatrixAccumulatorKHR", .value = 2, .parameters = &.{} },
+ },
+ .cooperative_matrix_reduce => &.{
+ .{ .name = "Row", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "Column", .value = 0x0002, .parameters = &.{} },
+ .{ .name = "2x2", .value = 0x0004, .parameters = &.{} },
+ },
+ .tensor_clamp_mode => &.{
+ .{ .name = "Undefined", .value = 0, .parameters = &.{} },
+ .{ .name = "Constant", .value = 1, .parameters = &.{} },
+ .{ .name = "ClampToEdge", .value = 2, .parameters = &.{} },
+ .{ .name = "Repeat", .value = 3, .parameters = &.{} },
+ .{ .name = "RepeatMirrored", .value = 4, .parameters = &.{} },
+ },
+ .tensor_addressing_operands => &.{
+ .{ .name = "TensorView", .value = 0x0001, .parameters = &.{.id_ref} },
+ .{ .name = "DecodeFunc", .value = 0x0002, .parameters = &.{.id_ref} },
+ },
+ .initialization_mode_qualifier => &.{
+ .{ .name = "InitOnDeviceReprogramINTEL", .value = 0, .parameters = &.{} },
+ .{ .name = "InitOnDeviceResetINTEL", .value = 1, .parameters = &.{} },
+ },
+ .load_cache_control => &.{
+ .{ .name = "UncachedINTEL", .value = 0, .parameters = &.{} },
+ .{ .name = "CachedINTEL", .value = 1, .parameters = &.{} },
+ .{ .name = "StreamingINTEL", .value = 2, .parameters = &.{} },
+ .{ .name = "InvalidateAfterReadINTEL", .value = 3, .parameters = &.{} },
+ .{ .name = "ConstCachedINTEL", .value = 4, .parameters = &.{} },
+ },
+ .store_cache_control => &.{
+ .{ .name = "UncachedINTEL", .value = 0, .parameters = &.{} },
+ .{ .name = "WriteThroughINTEL", .value = 1, .parameters = &.{} },
+ .{ .name = "WriteBackINTEL", .value = 2, .parameters = &.{} },
+ .{ .name = "StreamingINTEL", .value = 3, .parameters = &.{} },
+ },
+ .named_maximum_number_of_registers => &.{
+ .{ .name = "AutoINTEL", .value = 0, .parameters = &.{} },
+ },
+ .matrix_multiply_accumulate_operands => &.{
+ .{ .name = "MatrixASignedComponentsINTEL", .value = 0x1, .parameters = &.{} },
+ .{ .name = "MatrixBSignedComponentsINTEL", .value = 0x2, .parameters = &.{} },
+ .{ .name = "MatrixCBFloat16INTEL", .value = 0x4, .parameters = &.{} },
+ .{ .name = "MatrixResultBFloat16INTEL", .value = 0x8, .parameters = &.{} },
+ .{ .name = "MatrixAPackedInt8INTEL", .value = 0x10, .parameters = &.{} },
+ .{ .name = "MatrixBPackedInt8INTEL", .value = 0x20, .parameters = &.{} },
+ .{ .name = "MatrixAPackedInt4INTEL", .value = 0x40, .parameters = &.{} },
+ .{ .name = "MatrixBPackedInt4INTEL", .value = 0x80, .parameters = &.{} },
+ .{ .name = "MatrixATF32INTEL", .value = 0x100, .parameters = &.{} },
+ .{ .name = "MatrixBTF32INTEL", .value = 0x200, .parameters = &.{} },
+ .{ .name = "MatrixAPackedFloat16INTEL", .value = 0x400, .parameters = &.{} },
+ .{ .name = "MatrixBPackedFloat16INTEL", .value = 0x800, .parameters = &.{} },
+ .{ .name = "MatrixAPackedBFloat16INTEL", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "MatrixBPackedBFloat16INTEL", .value = 0x2000, .parameters = &.{} },
+ },
+ .fp_encoding => &.{
+ .{ .name = "BFloat16KHR", .value = 0, .parameters = &.{} },
+ .{ .name = "Float8E4M3EXT", .value = 4214, .parameters = &.{} },
+ .{ .name = "Float8E5M2EXT", .value = 4215, .parameters = &.{} },
+ },
+ .cooperative_vector_matrix_layout => &.{
+ .{ .name = "RowMajorNV", .value = 0, .parameters = &.{} },
+ .{ .name = "ColumnMajorNV", .value = 1, .parameters = &.{} },
+ .{ .name = "InferencingOptimalNV", .value = 2, .parameters = &.{} },
+ .{ .name = "TrainingOptimalNV", .value = 3, .parameters = &.{} },
+ },
+ .component_type => &.{
+ .{ .name = "Float16NV", .value = 0, .parameters = &.{} },
+ .{ .name = "Float32NV", .value = 1, .parameters = &.{} },
+ .{ .name = "Float64NV", .value = 2, .parameters = &.{} },
+ .{ .name = "SignedInt8NV", .value = 3, .parameters = &.{} },
+ .{ .name = "SignedInt16NV", .value = 4, .parameters = &.{} },
+ .{ .name = "SignedInt32NV", .value = 5, .parameters = &.{} },
+ .{ .name = "SignedInt64NV", .value = 6, .parameters = &.{} },
+ .{ .name = "UnsignedInt8NV", .value = 7, .parameters = &.{} },
+ .{ .name = "UnsignedInt16NV", .value = 8, .parameters = &.{} },
+ .{ .name = "UnsignedInt32NV", .value = 9, .parameters = &.{} },
+ .{ .name = "UnsignedInt64NV", .value = 10, .parameters = &.{} },
+ .{ .name = "SignedInt8PackedNV", .value = 1000491000, .parameters = &.{} },
+ .{ .name = "UnsignedInt8PackedNV", .value = 1000491001, .parameters = &.{} },
+ .{ .name = "FloatE4M3NV", .value = 1000491002, .parameters = &.{} },
+ .{ .name = "FloatE5M2NV", .value = 1000491003, .parameters = &.{} },
+ },
+ .id_result_type => unreachable,
+ .id_result => unreachable,
+ .id_memory_semantics => unreachable,
+ .id_scope => unreachable,
+ .id_ref => unreachable,
+ .literal_integer => unreachable,
+ .literal_string => unreachable,
+ .literal_float => unreachable,
+ .literal_context_dependent_number => unreachable,
+ .literal_ext_inst_integer => unreachable,
+ .literal_spec_constant_op_integer => unreachable,
+ .pair_literal_integer_id_ref => unreachable,
+ .pair_id_ref_literal_integer => unreachable,
+ .pair_id_ref_id_ref => unreachable,
+ .tensor_operands => &.{
+ .{ .name = "NoneARM", .value = 0x0000, .parameters = &.{} },
+ .{ .name = "NontemporalARM", .value = 0x0001, .parameters = &.{} },
+ .{ .name = "OutOfBoundsValueARM", .value = 0x0002, .parameters = &.{.id_ref} },
+ .{ .name = "MakeElementAvailableARM", .value = 0x0004, .parameters = &.{.id_ref} },
+ .{ .name = "MakeElementVisibleARM", .value = 0x0008, .parameters = &.{.id_ref} },
+ .{ .name = "NonPrivateElementARM", .value = 0x0010, .parameters = &.{} },
+ },
+ .debug_info_debug_info_flags => &.{
+ .{ .name = "FlagIsProtected", .value = 0x01, .parameters = &.{} },
+ .{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &.{} },
+ .{ .name = "FlagIsPublic", .value = 0x03, .parameters = &.{} },
+ .{ .name = "FlagIsLocal", .value = 0x04, .parameters = &.{} },
+ .{ .name = "FlagIsDefinition", .value = 0x08, .parameters = &.{} },
+ .{ .name = "FlagFwdDecl", .value = 0x10, .parameters = &.{} },
+ .{ .name = "FlagArtificial", .value = 0x20, .parameters = &.{} },
+ .{ .name = "FlagExplicit", .value = 0x40, .parameters = &.{} },
+ .{ .name = "FlagPrototyped", .value = 0x80, .parameters = &.{} },
+ .{ .name = "FlagObjectPointer", .value = 0x100, .parameters = &.{} },
+ .{ .name = "FlagStaticMember", .value = 0x200, .parameters = &.{} },
+ .{ .name = "FlagIndirectVariable", .value = 0x400, .parameters = &.{} },
+ .{ .name = "FlagLValueReference", .value = 0x800, .parameters = &.{} },
+ .{ .name = "FlagRValueReference", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "FlagIsOptimized", .value = 0x2000, .parameters = &.{} },
+ },
+ .debug_info_debug_base_type_attribute_encoding => &.{
+ .{ .name = "Unspecified", .value = 0, .parameters = &.{} },
+ .{ .name = "Address", .value = 1, .parameters = &.{} },
+ .{ .name = "Boolean", .value = 2, .parameters = &.{} },
+ .{ .name = "Float", .value = 4, .parameters = &.{} },
+ .{ .name = "Signed", .value = 5, .parameters = &.{} },
+ .{ .name = "SignedChar", .value = 6, .parameters = &.{} },
+ .{ .name = "Unsigned", .value = 7, .parameters = &.{} },
+ .{ .name = "UnsignedChar", .value = 8, .parameters = &.{} },
+ },
+ .debug_info_debug_composite_type => &.{
+ .{ .name = "Class", .value = 0, .parameters = &.{} },
+ .{ .name = "Structure", .value = 1, .parameters = &.{} },
+ .{ .name = "Union", .value = 2, .parameters = &.{} },
+ },
+ .debug_info_debug_type_qualifier => &.{
+ .{ .name = "ConstType", .value = 0, .parameters = &.{} },
+ .{ .name = "VolatileType", .value = 1, .parameters = &.{} },
+ .{ .name = "RestrictType", .value = 2, .parameters = &.{} },
+ },
+ .debug_info_debug_operation => &.{
+ .{ .name = "Deref", .value = 0, .parameters = &.{} },
+ .{ .name = "Plus", .value = 1, .parameters = &.{} },
+ .{ .name = "Minus", .value = 2, .parameters = &.{} },
+ .{ .name = "PlusUconst", .value = 3, .parameters = &.{.literal_integer} },
+ .{ .name = "BitPiece", .value = 4, .parameters = &.{ .literal_integer, .literal_integer } },
+ .{ .name = "Swap", .value = 5, .parameters = &.{} },
+ .{ .name = "Xderef", .value = 6, .parameters = &.{} },
+ .{ .name = "StackValue", .value = 7, .parameters = &.{} },
+ .{ .name = "Constu", .value = 8, .parameters = &.{.literal_integer} },
+ },
+ .open_cl_debug_info_100_debug_info_flags => &.{
+ .{ .name = "FlagIsProtected", .value = 0x01, .parameters = &.{} },
+ .{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &.{} },
+ .{ .name = "FlagIsPublic", .value = 0x03, .parameters = &.{} },
+ .{ .name = "FlagIsLocal", .value = 0x04, .parameters = &.{} },
+ .{ .name = "FlagIsDefinition", .value = 0x08, .parameters = &.{} },
+ .{ .name = "FlagFwdDecl", .value = 0x10, .parameters = &.{} },
+ .{ .name = "FlagArtificial", .value = 0x20, .parameters = &.{} },
+ .{ .name = "FlagExplicit", .value = 0x40, .parameters = &.{} },
+ .{ .name = "FlagPrototyped", .value = 0x80, .parameters = &.{} },
+ .{ .name = "FlagObjectPointer", .value = 0x100, .parameters = &.{} },
+ .{ .name = "FlagStaticMember", .value = 0x200, .parameters = &.{} },
+ .{ .name = "FlagIndirectVariable", .value = 0x400, .parameters = &.{} },
+ .{ .name = "FlagLValueReference", .value = 0x800, .parameters = &.{} },
+ .{ .name = "FlagRValueReference", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "FlagIsOptimized", .value = 0x2000, .parameters = &.{} },
+ .{ .name = "FlagIsEnumClass", .value = 0x4000, .parameters = &.{} },
+ .{ .name = "FlagTypePassByValue", .value = 0x8000, .parameters = &.{} },
+ .{ .name = "FlagTypePassByReference", .value = 0x10000, .parameters = &.{} },
+ },
+ .open_cl_debug_info_100_debug_base_type_attribute_encoding => &.{
+ .{ .name = "Unspecified", .value = 0, .parameters = &.{} },
+ .{ .name = "Address", .value = 1, .parameters = &.{} },
+ .{ .name = "Boolean", .value = 2, .parameters = &.{} },
+ .{ .name = "Float", .value = 3, .parameters = &.{} },
+ .{ .name = "Signed", .value = 4, .parameters = &.{} },
+ .{ .name = "SignedChar", .value = 5, .parameters = &.{} },
+ .{ .name = "Unsigned", .value = 6, .parameters = &.{} },
+ .{ .name = "UnsignedChar", .value = 7, .parameters = &.{} },
+ },
+ .open_cl_debug_info_100_debug_composite_type => &.{
+ .{ .name = "Class", .value = 0, .parameters = &.{} },
+ .{ .name = "Structure", .value = 1, .parameters = &.{} },
+ .{ .name = "Union", .value = 2, .parameters = &.{} },
+ },
+ .open_cl_debug_info_100_debug_type_qualifier => &.{
+ .{ .name = "ConstType", .value = 0, .parameters = &.{} },
+ .{ .name = "VolatileType", .value = 1, .parameters = &.{} },
+ .{ .name = "RestrictType", .value = 2, .parameters = &.{} },
+ .{ .name = "AtomicType", .value = 3, .parameters = &.{} },
+ },
+ .open_cl_debug_info_100_debug_operation => &.{
+ .{ .name = "Deref", .value = 0, .parameters = &.{} },
+ .{ .name = "Plus", .value = 1, .parameters = &.{} },
+ .{ .name = "Minus", .value = 2, .parameters = &.{} },
+ .{ .name = "PlusUconst", .value = 3, .parameters = &.{.literal_integer} },
+ .{ .name = "BitPiece", .value = 4, .parameters = &.{ .literal_integer, .literal_integer } },
+ .{ .name = "Swap", .value = 5, .parameters = &.{} },
+ .{ .name = "Xderef", .value = 6, .parameters = &.{} },
+ .{ .name = "StackValue", .value = 7, .parameters = &.{} },
+ .{ .name = "Constu", .value = 8, .parameters = &.{.literal_integer} },
+ .{ .name = "Fragment", .value = 9, .parameters = &.{ .literal_integer, .literal_integer } },
+ },
+ .open_cl_debug_info_100_debug_imported_entity => &.{
+ .{ .name = "ImportedModule", .value = 0, .parameters = &.{} },
+ .{ .name = "ImportedDeclaration", .value = 1, .parameters = &.{} },
+ },
+ .non_semantic_clspv_reflection_6_kernel_property_flags => &.{
+ .{ .name = "MayUsePrintf", .value = 0x1, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_debug_info_flags => &.{
+ .{ .name = "FlagIsProtected", .value = 0x01, .parameters = &.{} },
+ .{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &.{} },
+ .{ .name = "FlagIsPublic", .value = 0x03, .parameters = &.{} },
+ .{ .name = "FlagIsLocal", .value = 0x04, .parameters = &.{} },
+ .{ .name = "FlagIsDefinition", .value = 0x08, .parameters = &.{} },
+ .{ .name = "FlagFwdDecl", .value = 0x10, .parameters = &.{} },
+ .{ .name = "FlagArtificial", .value = 0x20, .parameters = &.{} },
+ .{ .name = "FlagExplicit", .value = 0x40, .parameters = &.{} },
+ .{ .name = "FlagPrototyped", .value = 0x80, .parameters = &.{} },
+ .{ .name = "FlagObjectPointer", .value = 0x100, .parameters = &.{} },
+ .{ .name = "FlagStaticMember", .value = 0x200, .parameters = &.{} },
+ .{ .name = "FlagIndirectVariable", .value = 0x400, .parameters = &.{} },
+ .{ .name = "FlagLValueReference", .value = 0x800, .parameters = &.{} },
+ .{ .name = "FlagRValueReference", .value = 0x1000, .parameters = &.{} },
+ .{ .name = "FlagIsOptimized", .value = 0x2000, .parameters = &.{} },
+ .{ .name = "FlagIsEnumClass", .value = 0x4000, .parameters = &.{} },
+ .{ .name = "FlagTypePassByValue", .value = 0x8000, .parameters = &.{} },
+ .{ .name = "FlagTypePassByReference", .value = 0x10000, .parameters = &.{} },
+ .{ .name = "FlagUnknownPhysicalLayout", .value = 0x20000, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_build_identifier_flags => &.{
+ .{ .name = "IdentifierPossibleDuplicates", .value = 0x01, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_debug_base_type_attribute_encoding => &.{
+ .{ .name = "Unspecified", .value = 0, .parameters = &.{} },
+ .{ .name = "Address", .value = 1, .parameters = &.{} },
+ .{ .name = "Boolean", .value = 2, .parameters = &.{} },
+ .{ .name = "Float", .value = 3, .parameters = &.{} },
+ .{ .name = "Signed", .value = 4, .parameters = &.{} },
+ .{ .name = "SignedChar", .value = 5, .parameters = &.{} },
+ .{ .name = "Unsigned", .value = 6, .parameters = &.{} },
+ .{ .name = "UnsignedChar", .value = 7, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_debug_composite_type => &.{
+ .{ .name = "Class", .value = 0, .parameters = &.{} },
+ .{ .name = "Structure", .value = 1, .parameters = &.{} },
+ .{ .name = "Union", .value = 2, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_debug_type_qualifier => &.{
+ .{ .name = "ConstType", .value = 0, .parameters = &.{} },
+ .{ .name = "VolatileType", .value = 1, .parameters = &.{} },
+ .{ .name = "RestrictType", .value = 2, .parameters = &.{} },
+ .{ .name = "AtomicType", .value = 3, .parameters = &.{} },
+ },
+ .non_semantic_shader_debug_info_100_debug_operation => &.{
+ .{ .name = "Deref", .value = 0, .parameters = &.{} },
+ .{ .name = "Plus", .value = 1, .parameters = &.{} },
+ .{ .name = "Minus", .value = 2, .parameters = &.{} },
+ .{ .name = "PlusUconst", .value = 3, .parameters = &.{.id_ref} },
+ .{ .name = "BitPiece", .value = 4, .parameters = &.{ .id_ref, .id_ref } },
+ .{ .name = "Swap", .value = 5, .parameters = &.{} },
+ .{ .name = "Xderef", .value = 6, .parameters = &.{} },
+ .{ .name = "StackValue", .value = 7, .parameters = &.{} },
+ .{ .name = "Constu", .value = 8, .parameters = &.{.id_ref} },
+ .{ .name = "Fragment", .value = 9, .parameters = &.{ .id_ref, .id_ref } },
+ },
+ .non_semantic_shader_debug_info_100_debug_imported_entity => &.{
+ .{ .name = "ImportedModule", .value = 0, .parameters = &.{} },
+ .{ .name = "ImportedDeclaration", .value = 1, .parameters = &.{} },
+ },
+ };
+ }
+};
+pub const Opcode = enum(u16) {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpExecutionModeId = 331,
+ OpDecorateId = 332,
+ OpGroupNonUniformElect = 333,
+ OpGroupNonUniformAll = 334,
+ OpGroupNonUniformAny = 335,
+ OpGroupNonUniformAllEqual = 336,
+ OpGroupNonUniformBroadcast = 337,
+ OpGroupNonUniformBroadcastFirst = 338,
+ OpGroupNonUniformBallot = 339,
+ OpGroupNonUniformInverseBallot = 340,
+ OpGroupNonUniformBallotBitExtract = 341,
+ OpGroupNonUniformBallotBitCount = 342,
+ OpGroupNonUniformBallotFindLSB = 343,
+ OpGroupNonUniformBallotFindMSB = 344,
+ OpGroupNonUniformShuffle = 345,
+ OpGroupNonUniformShuffleXor = 346,
+ OpGroupNonUniformShuffleUp = 347,
+ OpGroupNonUniformShuffleDown = 348,
+ OpGroupNonUniformIAdd = 349,
+ OpGroupNonUniformFAdd = 350,
+ OpGroupNonUniformIMul = 351,
+ OpGroupNonUniformFMul = 352,
+ OpGroupNonUniformSMin = 353,
+ OpGroupNonUniformUMin = 354,
+ OpGroupNonUniformFMin = 355,
+ OpGroupNonUniformSMax = 356,
+ OpGroupNonUniformUMax = 357,
+ OpGroupNonUniformFMax = 358,
+ OpGroupNonUniformBitwiseAnd = 359,
+ OpGroupNonUniformBitwiseOr = 360,
+ OpGroupNonUniformBitwiseXor = 361,
+ OpGroupNonUniformLogicalAnd = 362,
+ OpGroupNonUniformLogicalOr = 363,
+ OpGroupNonUniformLogicalXor = 364,
+ OpGroupNonUniformQuadBroadcast = 365,
+ OpGroupNonUniformQuadSwap = 366,
+ OpCopyLogical = 400,
+ OpPtrEqual = 401,
+ OpPtrNotEqual = 402,
+ OpPtrDiff = 403,
+ OpColorAttachmentReadEXT = 4160,
+ OpDepthAttachmentReadEXT = 4161,
+ OpStencilAttachmentReadEXT = 4162,
+ OpTypeTensorARM = 4163,
+ OpTensorReadARM = 4164,
+ OpTensorWriteARM = 4165,
+ OpTensorQuerySizeARM = 4166,
+ OpGraphConstantARM = 4181,
+ OpGraphEntryPointARM = 4182,
+ OpGraphARM = 4183,
+ OpGraphInputARM = 4184,
+ OpGraphSetOutputARM = 4185,
+ OpGraphEndARM = 4186,
+ OpTypeGraphARM = 4190,
+ OpTerminateInvocation = 4416,
+ OpTypeUntypedPointerKHR = 4417,
+ OpUntypedVariableKHR = 4418,
+ OpUntypedAccessChainKHR = 4419,
+ OpUntypedInBoundsAccessChainKHR = 4420,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpUntypedPtrAccessChainKHR = 4423,
+ OpUntypedInBoundsPtrAccessChainKHR = 4424,
+ OpUntypedArrayLengthKHR = 4425,
+ OpUntypedPrefetchKHR = 4426,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpGroupNonUniformRotateKHR = 4431,
+ OpSubgroupReadInvocationKHR = 4432,
+ OpExtInstWithForwardRefsKHR = 4433,
+ OpTraceRayKHR = 4445,
+ OpExecuteCallableKHR = 4446,
+ OpConvertUToAccelerationStructureKHR = 4447,
+ OpIgnoreIntersectionKHR = 4448,
+ OpTerminateRayKHR = 4449,
+ OpSDot = 4450,
+ OpUDot = 4451,
+ OpSUDot = 4452,
+ OpSDotAccSat = 4453,
+ OpUDotAccSat = 4454,
+ OpSUDotAccSat = 4455,
+ OpTypeCooperativeMatrixKHR = 4456,
+ OpCooperativeMatrixLoadKHR = 4457,
+ OpCooperativeMatrixStoreKHR = 4458,
+ OpCooperativeMatrixMulAddKHR = 4459,
+ OpCooperativeMatrixLengthKHR = 4460,
+ OpConstantCompositeReplicateEXT = 4461,
+ OpSpecConstantCompositeReplicateEXT = 4462,
+ OpCompositeConstructReplicateEXT = 4463,
+ OpTypeRayQueryKHR = 4472,
+ OpRayQueryInitializeKHR = 4473,
+ OpRayQueryTerminateKHR = 4474,
+ OpRayQueryGenerateIntersectionKHR = 4475,
+ OpRayQueryConfirmIntersectionKHR = 4476,
+ OpRayQueryProceedKHR = 4477,
+ OpRayQueryGetIntersectionTypeKHR = 4479,
+ OpImageSampleWeightedQCOM = 4480,
+ OpImageBoxFilterQCOM = 4481,
+ OpImageBlockMatchSSDQCOM = 4482,
+ OpImageBlockMatchSADQCOM = 4483,
+ OpImageBlockMatchWindowSSDQCOM = 4500,
+ OpImageBlockMatchWindowSADQCOM = 4501,
+ OpImageBlockMatchGatherSSDQCOM = 4502,
+ OpImageBlockMatchGatherSADQCOM = 4503,
+ OpGroupIAddNonUniformAMD = 5000,
+ OpGroupFAddNonUniformAMD = 5001,
+ OpGroupFMinNonUniformAMD = 5002,
+ OpGroupUMinNonUniformAMD = 5003,
+ OpGroupSMinNonUniformAMD = 5004,
+ OpGroupFMaxNonUniformAMD = 5005,
+ OpGroupUMaxNonUniformAMD = 5006,
+ OpGroupSMaxNonUniformAMD = 5007,
+ OpFragmentMaskFetchAMD = 5011,
+ OpFragmentFetchAMD = 5012,
+ OpReadClockKHR = 5056,
+ OpAllocateNodePayloadsAMDX = 5074,
+ OpEnqueueNodePayloadsAMDX = 5075,
+ OpTypeNodePayloadArrayAMDX = 5076,
+ OpFinishWritingNodePayloadAMDX = 5078,
+ OpNodePayloadArrayLengthAMDX = 5090,
+ OpIsNodePayloadValidAMDX = 5101,
+ OpConstantStringAMDX = 5103,
+ OpSpecConstantStringAMDX = 5104,
+ OpGroupNonUniformQuadAllKHR = 5110,
+ OpGroupNonUniformQuadAnyKHR = 5111,
+ OpHitObjectRecordHitMotionNV = 5249,
+ OpHitObjectRecordHitWithIndexMotionNV = 5250,
+ OpHitObjectRecordMissMotionNV = 5251,
+ OpHitObjectGetWorldToObjectNV = 5252,
+ OpHitObjectGetObjectToWorldNV = 5253,
+ OpHitObjectGetObjectRayDirectionNV = 5254,
+ OpHitObjectGetObjectRayOriginNV = 5255,
+ OpHitObjectTraceRayMotionNV = 5256,
+ OpHitObjectGetShaderRecordBufferHandleNV = 5257,
+ OpHitObjectGetShaderBindingTableRecordIndexNV = 5258,
+ OpHitObjectRecordEmptyNV = 5259,
+ OpHitObjectTraceRayNV = 5260,
+ OpHitObjectRecordHitNV = 5261,
+ OpHitObjectRecordHitWithIndexNV = 5262,
+ OpHitObjectRecordMissNV = 5263,
+ OpHitObjectExecuteShaderNV = 5264,
+ OpHitObjectGetCurrentTimeNV = 5265,
+ OpHitObjectGetAttributesNV = 5266,
+ OpHitObjectGetHitKindNV = 5267,
+ OpHitObjectGetPrimitiveIndexNV = 5268,
+ OpHitObjectGetGeometryIndexNV = 5269,
+ OpHitObjectGetInstanceIdNV = 5270,
+ OpHitObjectGetInstanceCustomIndexNV = 5271,
+ OpHitObjectGetWorldRayDirectionNV = 5272,
+ OpHitObjectGetWorldRayOriginNV = 5273,
+ OpHitObjectGetRayTMaxNV = 5274,
+ OpHitObjectGetRayTMinNV = 5275,
+ OpHitObjectIsEmptyNV = 5276,
+ OpHitObjectIsHitNV = 5277,
+ OpHitObjectIsMissNV = 5278,
+ OpReorderThreadWithHitObjectNV = 5279,
+ OpReorderThreadWithHintNV = 5280,
+ OpTypeHitObjectNV = 5281,
+ OpImageSampleFootprintNV = 5283,
+ OpTypeCooperativeVectorNV = 5288,
+ OpCooperativeVectorMatrixMulNV = 5289,
+ OpCooperativeVectorOuterProductAccumulateNV = 5290,
+ OpCooperativeVectorReduceSumAccumulateNV = 5291,
+ OpCooperativeVectorMatrixMulAddNV = 5292,
+ OpCooperativeMatrixConvertNV = 5293,
+ OpEmitMeshTasksEXT = 5294,
+ OpSetMeshOutputsEXT = 5295,
+ OpGroupNonUniformPartitionNV = 5296,
+ OpWritePackedPrimitiveIndices4x8NV = 5299,
+ OpFetchMicroTriangleVertexPositionNV = 5300,
+ OpFetchMicroTriangleVertexBarycentricNV = 5301,
+ OpCooperativeVectorLoadNV = 5302,
+ OpCooperativeVectorStoreNV = 5303,
+ OpReportIntersectionKHR = 5334,
+ OpIgnoreIntersectionNV = 5335,
+ OpTerminateRayNV = 5336,
+ OpTraceNV = 5337,
+ OpTraceMotionNV = 5338,
+ OpTraceRayMotionNV = 5339,
+ OpRayQueryGetIntersectionTriangleVertexPositionsKHR = 5340,
+ OpTypeAccelerationStructureKHR = 5341,
+ OpExecuteCallableNV = 5344,
+ OpRayQueryGetClusterIdNV = 5345,
+ OpHitObjectGetClusterIdNV = 5346,
+ OpTypeCooperativeMatrixNV = 5358,
+ OpCooperativeMatrixLoadNV = 5359,
+ OpCooperativeMatrixStoreNV = 5360,
+ OpCooperativeMatrixMulAddNV = 5361,
+ OpCooperativeMatrixLengthNV = 5362,
+ OpBeginInvocationInterlockEXT = 5364,
+ OpEndInvocationInterlockEXT = 5365,
+ OpCooperativeMatrixReduceNV = 5366,
+ OpCooperativeMatrixLoadTensorNV = 5367,
+ OpCooperativeMatrixStoreTensorNV = 5368,
+ OpCooperativeMatrixPerElementOpNV = 5369,
+ OpTypeTensorLayoutNV = 5370,
+ OpTypeTensorViewNV = 5371,
+ OpCreateTensorLayoutNV = 5372,
+ OpTensorLayoutSetDimensionNV = 5373,
+ OpTensorLayoutSetStrideNV = 5374,
+ OpTensorLayoutSliceNV = 5375,
+ OpTensorLayoutSetClampValueNV = 5376,
+ OpCreateTensorViewNV = 5377,
+ OpTensorViewSetDimensionNV = 5378,
+ OpTensorViewSetStrideNV = 5379,
+ OpDemoteToHelperInvocation = 5380,
+ OpIsHelperInvocationEXT = 5381,
+ OpTensorViewSetClipNV = 5382,
+ OpTensorLayoutSetBlockSizeNV = 5384,
+ OpCooperativeMatrixTransposeNV = 5390,
+ OpConvertUToImageNV = 5391,
+ OpConvertUToSamplerNV = 5392,
+ OpConvertImageToUNV = 5393,
+ OpConvertSamplerToUNV = 5394,
+ OpConvertUToSampledImageNV = 5395,
+ OpConvertSampledImageToUNV = 5396,
+ OpSamplerImageAddressingModeNV = 5397,
+ OpRawAccessChainNV = 5398,
+ OpRayQueryGetIntersectionSpherePositionNV = 5427,
+ OpRayQueryGetIntersectionSphereRadiusNV = 5428,
+ OpRayQueryGetIntersectionLSSPositionsNV = 5429,
+ OpRayQueryGetIntersectionLSSRadiiNV = 5430,
+ OpRayQueryGetIntersectionLSSHitValueNV = 5431,
+ OpHitObjectGetSpherePositionNV = 5432,
+ OpHitObjectGetSphereRadiusNV = 5433,
+ OpHitObjectGetLSSPositionsNV = 5434,
+ OpHitObjectGetLSSRadiiNV = 5435,
+ OpHitObjectIsSphereHitNV = 5436,
+ OpHitObjectIsLSSHitNV = 5437,
+ OpRayQueryIsSphereHitNV = 5438,
+ OpRayQueryIsLSSHitNV = 5439,
+ OpSubgroupShuffleINTEL = 5571,
+ OpSubgroupShuffleDownINTEL = 5572,
+ OpSubgroupShuffleUpINTEL = 5573,
+ OpSubgroupShuffleXorINTEL = 5574,
+ OpSubgroupBlockReadINTEL = 5575,
+ OpSubgroupBlockWriteINTEL = 5576,
+ OpSubgroupImageBlockReadINTEL = 5577,
+ OpSubgroupImageBlockWriteINTEL = 5578,
+ OpSubgroupImageMediaBlockReadINTEL = 5580,
+ OpSubgroupImageMediaBlockWriteINTEL = 5581,
+ OpUCountLeadingZerosINTEL = 5585,
+ OpUCountTrailingZerosINTEL = 5586,
+ OpAbsISubINTEL = 5587,
+ OpAbsUSubINTEL = 5588,
+ OpIAddSatINTEL = 5589,
+ OpUAddSatINTEL = 5590,
+ OpIAverageINTEL = 5591,
+ OpUAverageINTEL = 5592,
+ OpIAverageRoundedINTEL = 5593,
+ OpUAverageRoundedINTEL = 5594,
+ OpISubSatINTEL = 5595,
+ OpUSubSatINTEL = 5596,
+ OpIMul32x16INTEL = 5597,
+ OpUMul32x16INTEL = 5598,
+ OpAtomicFMinEXT = 5614,
+ OpAtomicFMaxEXT = 5615,
+ OpAssumeTrueKHR = 5630,
+ OpExpectKHR = 5631,
+ OpDecorateString = 5632,
+ OpMemberDecorateString = 5633,
+ OpLoopControlINTEL = 5887,
+ OpReadPipeBlockingINTEL = 5946,
+ OpWritePipeBlockingINTEL = 5947,
+ OpFPGARegINTEL = 5949,
+ OpRayQueryGetRayTMinKHR = 6016,
+ OpRayQueryGetRayFlagsKHR = 6017,
+ OpRayQueryGetIntersectionTKHR = 6018,
+ OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019,
+ OpRayQueryGetIntersectionInstanceIdKHR = 6020,
+ OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021,
+ OpRayQueryGetIntersectionGeometryIndexKHR = 6022,
+ OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023,
+ OpRayQueryGetIntersectionBarycentricsKHR = 6024,
+ OpRayQueryGetIntersectionFrontFaceKHR = 6025,
+ OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026,
+ OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027,
+ OpRayQueryGetIntersectionObjectRayOriginKHR = 6028,
+ OpRayQueryGetWorldRayDirectionKHR = 6029,
+ OpRayQueryGetWorldRayOriginKHR = 6030,
+ OpRayQueryGetIntersectionObjectToWorldKHR = 6031,
+ OpRayQueryGetIntersectionWorldToObjectKHR = 6032,
+ OpAtomicFAddEXT = 6035,
+ OpTypeBufferSurfaceINTEL = 6086,
+ OpTypeStructContinuedINTEL = 6090,
+ OpConstantCompositeContinuedINTEL = 6091,
+ OpSpecConstantCompositeContinuedINTEL = 6092,
+ OpCompositeConstructContinuedINTEL = 6096,
+ OpConvertFToBF16INTEL = 6116,
+ OpConvertBF16ToFINTEL = 6117,
+ OpControlBarrierArriveINTEL = 6142,
+ OpControlBarrierWaitINTEL = 6143,
+ OpArithmeticFenceEXT = 6145,
+ OpTaskSequenceCreateINTEL = 6163,
+ OpTaskSequenceAsyncINTEL = 6164,
+ OpTaskSequenceGetINTEL = 6165,
+ OpTaskSequenceReleaseINTEL = 6166,
+ OpTypeTaskSequenceINTEL = 6199,
+ OpSubgroupBlockPrefetchINTEL = 6221,
+ OpSubgroup2DBlockLoadINTEL = 6231,
+ OpSubgroup2DBlockLoadTransformINTEL = 6232,
+ OpSubgroup2DBlockLoadTransposeINTEL = 6233,
+ OpSubgroup2DBlockPrefetchINTEL = 6234,
+ OpSubgroup2DBlockStoreINTEL = 6235,
+ OpSubgroupMatrixMultiplyAccumulateINTEL = 6237,
+ OpBitwiseFunctionINTEL = 6242,
+ OpGroupIMulKHR = 6401,
+ OpGroupFMulKHR = 6402,
+ OpGroupBitwiseAndKHR = 6403,
+ OpGroupBitwiseOrKHR = 6404,
+ OpGroupBitwiseXorKHR = 6405,
+ OpGroupLogicalAndKHR = 6406,
+ OpGroupLogicalOrKHR = 6407,
+ OpGroupLogicalXorKHR = 6408,
+ OpRoundFToTF32INTEL = 6426,
+ OpMaskedGatherINTEL = 6428,
+ OpMaskedScatterINTEL = 6429,
+ OpConvertHandleToImageINTEL = 6529,
+ OpConvertHandleToSamplerINTEL = 6530,
+ OpConvertHandleToSampledImageINTEL = 6531,
+
+ pub fn Operands(comptime self: Opcode) type {
+ return switch (self) {
+ .OpNop => void,
+ .OpUndef => struct { id_result_type: Id, id_result: Id },
+ .OpSourceContinued => struct { continued_source: LiteralString },
+ .OpSource => struct { source_language: SourceLanguage, version: LiteralInteger, file: ?Id = null, source: ?LiteralString = null },
+ .OpSourceExtension => struct { extension: LiteralString },
+ .OpName => struct { target: Id, name: LiteralString },
+ .OpMemberName => struct { type: Id, member: LiteralInteger, name: LiteralString },
+ .OpString => struct { id_result: Id, string: LiteralString },
+ .OpLine => struct { file: Id, line: LiteralInteger, column: LiteralInteger },
+ .OpExtension => struct { name: LiteralString },
+ .OpExtInstImport => struct { id_result: Id, name: LiteralString },
+ .OpExtInst => struct { id_result_type: Id, id_result: Id, set: Id, instruction: LiteralExtInstInteger, id_ref_4: []const Id = &.{} },
+ .OpMemoryModel => struct { addressing_model: AddressingModel, memory_model: MemoryModel },
+ .OpEntryPoint => struct { execution_model: ExecutionModel, entry_point: Id, name: LiteralString, interface: []const Id = &.{} },
+ .OpExecutionMode => struct { entry_point: Id, mode: ExecutionMode.Extended },
+ .OpCapability => struct { capability: Capability },
+ .OpTypeVoid => struct { id_result: Id },
+ .OpTypeBool => struct { id_result: Id },
+ .OpTypeInt => struct { id_result: Id, width: LiteralInteger, signedness: LiteralInteger },
+ .OpTypeFloat => struct { id_result: Id, width: LiteralInteger, floating_point_encoding: ?FPEncoding = null },
+ .OpTypeVector => struct { id_result: Id, component_type: Id, component_count: LiteralInteger },
+ .OpTypeMatrix => struct { id_result: Id, column_type: Id, column_count: LiteralInteger },
+ .OpTypeImage => struct { id_result: Id, sampled_type: Id, dim: Dim, depth: LiteralInteger, arrayed: LiteralInteger, ms: LiteralInteger, sampled: LiteralInteger, image_format: ImageFormat, access_qualifier: ?AccessQualifier = null },
+ .OpTypeSampler => struct { id_result: Id },
+ .OpTypeSampledImage => struct { id_result: Id, image_type: Id },
+ .OpTypeArray => struct { id_result: Id, element_type: Id, length: Id },
+ .OpTypeRuntimeArray => struct { id_result: Id, element_type: Id },
+ .OpTypeStruct => struct { id_result: Id, id_ref: []const Id = &.{} },
+ .OpTypeOpaque => struct { id_result: Id, literal_string: LiteralString },
+ .OpTypePointer => struct { id_result: Id, storage_class: StorageClass, type: Id },
+ .OpTypeFunction => struct { id_result: Id, return_type: Id, id_ref_2: []const Id = &.{} },
+ .OpTypeEvent => struct { id_result: Id },
+ .OpTypeDeviceEvent => struct { id_result: Id },
+ .OpTypeReserveId => struct { id_result: Id },
+ .OpTypeQueue => struct { id_result: Id },
+ .OpTypePipe => struct { id_result: Id, qualifier: AccessQualifier },
+ .OpTypeForwardPointer => struct { pointer_type: Id, storage_class: StorageClass },
+ .OpConstantTrue => struct { id_result_type: Id, id_result: Id },
+ .OpConstantFalse => struct { id_result_type: Id, id_result: Id },
+ .OpConstant => struct { id_result_type: Id, id_result: Id, value: LiteralContextDependentNumber },
+ .OpConstantComposite => struct { id_result_type: Id, id_result: Id, constituents: []const Id = &.{} },
+ .OpConstantSampler => struct { id_result_type: Id, id_result: Id, sampler_addressing_mode: SamplerAddressingMode, param: LiteralInteger, sampler_filter_mode: SamplerFilterMode },
+ .OpConstantNull => struct { id_result_type: Id, id_result: Id },
+ .OpSpecConstantTrue => struct { id_result_type: Id, id_result: Id },
+ .OpSpecConstantFalse => struct { id_result_type: Id, id_result: Id },
+ .OpSpecConstant => struct { id_result_type: Id, id_result: Id, value: LiteralContextDependentNumber },
+ .OpSpecConstantComposite => struct { id_result_type: Id, id_result: Id, constituents: []const Id = &.{} },
+ .OpSpecConstantOp => struct { id_result_type: Id, id_result: Id, opcode: LiteralSpecConstantOpInteger },
+ .OpFunction => struct { id_result_type: Id, id_result: Id, function_control: FunctionControl, function_type: Id },
+ .OpFunctionParameter => struct { id_result_type: Id, id_result: Id },
+ .OpFunctionEnd => void,
+ .OpFunctionCall => struct { id_result_type: Id, id_result: Id, function: Id, id_ref_3: []const Id = &.{} },
+ .OpVariable => struct { id_result_type: Id, id_result: Id, storage_class: StorageClass, initializer: ?Id = null },
+ .OpImageTexelPointer => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, sample: Id },
+ .OpLoad => struct { id_result_type: Id, id_result: Id, pointer: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpStore => struct { pointer: Id, object: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpCopyMemory => struct { target: Id, source: Id, memory_access_2: ?MemoryAccess.Extended = null, memory_access_3: ?MemoryAccess.Extended = null },
+ .OpCopyMemorySized => struct { target: Id, source: Id, size: Id, memory_access_3: ?MemoryAccess.Extended = null, memory_access_4: ?MemoryAccess.Extended = null },
+ .OpAccessChain => struct { id_result_type: Id, id_result: Id, base: Id, indexes: []const Id = &.{} },
+ .OpInBoundsAccessChain => struct { id_result_type: Id, id_result: Id, base: Id, indexes: []const Id = &.{} },
+ .OpPtrAccessChain => struct { id_result_type: Id, id_result: Id, base: Id, element: Id, indexes: []const Id = &.{} },
+ .OpArrayLength => struct { id_result_type: Id, id_result: Id, structure: Id, array_member: LiteralInteger },
+ .OpGenericPtrMemSemantics => struct { id_result_type: Id, id_result: Id, pointer: Id },
+ .OpInBoundsPtrAccessChain => struct { id_result_type: Id, id_result: Id, base: Id, element: Id, indexes: []const Id = &.{} },
+ .OpDecorate => struct { target: Id, decoration: Decoration.Extended },
+ .OpMemberDecorate => struct { structure_type: Id, member: LiteralInteger, decoration: Decoration.Extended },
+ .OpDecorationGroup => struct { id_result: Id },
+ .OpGroupDecorate => struct { decoration_group: Id, targets: []const Id = &.{} },
+ .OpGroupMemberDecorate => struct { decoration_group: Id, targets: []const PairIdRefLiteralInteger = &.{} },
+ .OpVectorExtractDynamic => struct { id_result_type: Id, id_result: Id, vector: Id, index: Id },
+ .OpVectorInsertDynamic => struct { id_result_type: Id, id_result: Id, vector: Id, component: Id, index: Id },
+ .OpVectorShuffle => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, components: []const LiteralInteger = &.{} },
+ .OpCompositeConstruct => struct { id_result_type: Id, id_result: Id, constituents: []const Id = &.{} },
+ .OpCompositeExtract => struct { id_result_type: Id, id_result: Id, composite: Id, indexes: []const LiteralInteger = &.{} },
+ .OpCompositeInsert => struct { id_result_type: Id, id_result: Id, object: Id, composite: Id, indexes: []const LiteralInteger = &.{} },
+ .OpCopyObject => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpTranspose => struct { id_result_type: Id, id_result: Id, matrix: Id },
+ .OpSampledImage => struct { id_result_type: Id, id_result: Id, image: Id, sampler: Id },
+ .OpImageSampleImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ImageOperands.Extended },
+ .OpImageSampleDrefImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleDrefExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ImageOperands.Extended },
+ .OpImageSampleProjImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleProjExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ImageOperands.Extended },
+ .OpImageSampleProjDrefImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSampleProjDrefExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ImageOperands.Extended },
+ .OpImageFetch => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageGather => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, component: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageDrefGather => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageRead => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageWrite => struct { image: Id, coordinate: Id, texel: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImage => struct { id_result_type: Id, id_result: Id, sampled_image: Id },
+ .OpImageQueryFormat => struct { id_result_type: Id, id_result: Id, image: Id },
+ .OpImageQueryOrder => struct { id_result_type: Id, id_result: Id, image: Id },
+ .OpImageQuerySizeLod => struct { id_result_type: Id, id_result: Id, image: Id, level_of_detail: Id },
+ .OpImageQuerySize => struct { id_result_type: Id, id_result: Id, image: Id },
+ .OpImageQueryLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id },
+ .OpImageQueryLevels => struct { id_result_type: Id, id_result: Id, image: Id },
+ .OpImageQuerySamples => struct { id_result_type: Id, id_result: Id, image: Id },
+ .OpConvertFToU => struct { id_result_type: Id, id_result: Id, float_value: Id },
+ .OpConvertFToS => struct { id_result_type: Id, id_result: Id, float_value: Id },
+ .OpConvertSToF => struct { id_result_type: Id, id_result: Id, signed_value: Id },
+ .OpConvertUToF => struct { id_result_type: Id, id_result: Id, unsigned_value: Id },
+ .OpUConvert => struct { id_result_type: Id, id_result: Id, unsigned_value: Id },
+ .OpSConvert => struct { id_result_type: Id, id_result: Id, signed_value: Id },
+ .OpFConvert => struct { id_result_type: Id, id_result: Id, float_value: Id },
+ .OpQuantizeToF16 => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpConvertPtrToU => struct { id_result_type: Id, id_result: Id, pointer: Id },
+ .OpSatConvertSToU => struct { id_result_type: Id, id_result: Id, signed_value: Id },
+ .OpSatConvertUToS => struct { id_result_type: Id, id_result: Id, unsigned_value: Id },
+ .OpConvertUToPtr => struct { id_result_type: Id, id_result: Id, integer_value: Id },
+ .OpPtrCastToGeneric => struct { id_result_type: Id, id_result: Id, pointer: Id },
+ .OpGenericCastToPtr => struct { id_result_type: Id, id_result: Id, pointer: Id },
+ .OpGenericCastToPtrExplicit => struct { id_result_type: Id, id_result: Id, pointer: Id, storage: StorageClass },
+ .OpBitcast => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpSNegate => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpFNegate => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpIAdd => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFAdd => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpISub => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFSub => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpIMul => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFMul => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUDiv => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSDiv => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFDiv => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUMod => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSRem => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSMod => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFRem => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFMod => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpVectorTimesScalar => struct { id_result_type: Id, id_result: Id, vector: Id, scalar: Id },
+ .OpMatrixTimesScalar => struct { id_result_type: Id, id_result: Id, matrix: Id, scalar: Id },
+ .OpVectorTimesMatrix => struct { id_result_type: Id, id_result: Id, vector: Id, matrix: Id },
+ .OpMatrixTimesVector => struct { id_result_type: Id, id_result: Id, matrix: Id, vector: Id },
+ .OpMatrixTimesMatrix => struct { id_result_type: Id, id_result: Id, left_matrix: Id, right_matrix: Id },
+ .OpOuterProduct => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id },
+ .OpDot => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id },
+ .OpIAddCarry => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpISubBorrow => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUMulExtended => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSMulExtended => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpAny => struct { id_result_type: Id, id_result: Id, vector: Id },
+ .OpAll => struct { id_result_type: Id, id_result: Id, vector: Id },
+ .OpIsNan => struct { id_result_type: Id, id_result: Id, x: Id },
+ .OpIsInf => struct { id_result_type: Id, id_result: Id, x: Id },
+ .OpIsFinite => struct { id_result_type: Id, id_result: Id, x: Id },
+ .OpIsNormal => struct { id_result_type: Id, id_result: Id, x: Id },
+ .OpSignBitSet => struct { id_result_type: Id, id_result: Id, x: Id },
+ .OpLessOrGreater => struct { id_result_type: Id, id_result: Id, x: Id, y: Id },
+ .OpOrdered => struct { id_result_type: Id, id_result: Id, x: Id, y: Id },
+ .OpUnordered => struct { id_result_type: Id, id_result: Id, x: Id, y: Id },
+ .OpLogicalEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpLogicalNotEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpLogicalOr => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpLogicalAnd => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpLogicalNot => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpSelect => struct { id_result_type: Id, id_result: Id, condition: Id, object_1: Id, object_2: Id },
+ .OpIEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpINotEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUGreaterThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSGreaterThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUGreaterThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSGreaterThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpULessThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSLessThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpULessThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpSLessThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdNotEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordNotEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdLessThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordLessThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdGreaterThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordGreaterThan => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdLessThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordLessThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFOrdGreaterThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpFUnordGreaterThanEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpShiftRightLogical => struct { id_result_type: Id, id_result: Id, base: Id, shift: Id },
+ .OpShiftRightArithmetic => struct { id_result_type: Id, id_result: Id, base: Id, shift: Id },
+ .OpShiftLeftLogical => struct { id_result_type: Id, id_result: Id, base: Id, shift: Id },
+ .OpBitwiseOr => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpBitwiseXor => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpBitwiseAnd => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpNot => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpBitFieldInsert => struct { id_result_type: Id, id_result: Id, base: Id, insert: Id, offset: Id, count: Id },
+ .OpBitFieldSExtract => struct { id_result_type: Id, id_result: Id, base: Id, offset: Id, count: Id },
+ .OpBitFieldUExtract => struct { id_result_type: Id, id_result: Id, base: Id, offset: Id, count: Id },
+ .OpBitReverse => struct { id_result_type: Id, id_result: Id, base: Id },
+ .OpBitCount => struct { id_result_type: Id, id_result: Id, base: Id },
+ .OpDPdx => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpDPdy => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpFwidth => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpDPdxFine => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpDPdyFine => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpFwidthFine => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpDPdxCoarse => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpDPdyCoarse => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpFwidthCoarse => struct { id_result_type: Id, id_result: Id, p: Id },
+ .OpEmitVertex => void,
+ .OpEndPrimitive => void,
+ .OpEmitStreamVertex => struct { stream: Id },
+ .OpEndStreamPrimitive => struct { stream: Id },
+ .OpControlBarrier => struct { execution: Id, memory: Id, semantics: Id },
+ .OpMemoryBarrier => struct { memory: Id, semantics: Id },
+ .OpAtomicLoad => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id },
+ .OpAtomicStore => struct { pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicExchange => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicCompareExchange => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, equal: Id, unequal: Id, value: Id, comparator: Id },
+ .OpAtomicCompareExchangeWeak => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, equal: Id, unequal: Id, value: Id, comparator: Id },
+ .OpAtomicIIncrement => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id },
+ .OpAtomicIDecrement => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id },
+ .OpAtomicIAdd => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicISub => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicSMin => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicUMin => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicSMax => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicUMax => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicAnd => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicOr => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicXor => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpPhi => struct { id_result_type: Id, id_result: Id, pair_id_ref_id_ref: []const PairIdRefIdRef = &.{} },
+ .OpLoopMerge => struct { merge_block: Id, continue_target: Id, loop_control: LoopControl.Extended },
+ .OpSelectionMerge => struct { merge_block: Id, selection_control: SelectionControl },
+ .OpLabel => struct { id_result: Id },
+ .OpBranch => struct { target_label: Id },
+ .OpBranchConditional => struct { condition: Id, true_label: Id, false_label: Id, branch_weights: []const LiteralInteger = &.{} },
+ .OpSwitch => struct { selector: Id, default: Id, target: []const PairLiteralIntegerIdRef = &.{} },
+ .OpKill => void,
+ .OpReturn => void,
+ .OpReturnValue => struct { value: Id },
+ .OpUnreachable => void,
+ .OpLifetimeStart => struct { pointer: Id, size: LiteralInteger },
+ .OpLifetimeStop => struct { pointer: Id, size: LiteralInteger },
+ .OpGroupAsyncCopy => struct { id_result_type: Id, id_result: Id, execution: Id, destination: Id, source: Id, num_elements: Id, stride: Id, event: Id },
+ .OpGroupWaitEvents => struct { execution: Id, num_events: Id, events_list: Id },
+ .OpGroupAll => struct { id_result_type: Id, id_result: Id, execution: Id, predicate: Id },
+ .OpGroupAny => struct { id_result_type: Id, id_result: Id, execution: Id, predicate: Id },
+ .OpGroupBroadcast => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, local_id: Id },
+ .OpGroupIAdd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFAdd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupUMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupSMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupUMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupSMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpReadPipe => struct { id_result_type: Id, id_result: Id, pipe: Id, pointer: Id, packet_size: Id, packet_alignment: Id },
+ .OpWritePipe => struct { id_result_type: Id, id_result: Id, pipe: Id, pointer: Id, packet_size: Id, packet_alignment: Id },
+ .OpReservedReadPipe => struct { id_result_type: Id, id_result: Id, pipe: Id, reserve_id: Id, index: Id, pointer: Id, packet_size: Id, packet_alignment: Id },
+ .OpReservedWritePipe => struct { id_result_type: Id, id_result: Id, pipe: Id, reserve_id: Id, index: Id, pointer: Id, packet_size: Id, packet_alignment: Id },
+ .OpReserveReadPipePackets => struct { id_result_type: Id, id_result: Id, pipe: Id, num_packets: Id, packet_size: Id, packet_alignment: Id },
+ .OpReserveWritePipePackets => struct { id_result_type: Id, id_result: Id, pipe: Id, num_packets: Id, packet_size: Id, packet_alignment: Id },
+ .OpCommitReadPipe => struct { pipe: Id, reserve_id: Id, packet_size: Id, packet_alignment: Id },
+ .OpCommitWritePipe => struct { pipe: Id, reserve_id: Id, packet_size: Id, packet_alignment: Id },
+ .OpIsValidReserveId => struct { id_result_type: Id, id_result: Id, reserve_id: Id },
+ .OpGetNumPipePackets => struct { id_result_type: Id, id_result: Id, pipe: Id, packet_size: Id, packet_alignment: Id },
+ .OpGetMaxPipePackets => struct { id_result_type: Id, id_result: Id, pipe: Id, packet_size: Id, packet_alignment: Id },
+ .OpGroupReserveReadPipePackets => struct { id_result_type: Id, id_result: Id, execution: Id, pipe: Id, num_packets: Id, packet_size: Id, packet_alignment: Id },
+ .OpGroupReserveWritePipePackets => struct { id_result_type: Id, id_result: Id, execution: Id, pipe: Id, num_packets: Id, packet_size: Id, packet_alignment: Id },
+ .OpGroupCommitReadPipe => struct { execution: Id, pipe: Id, reserve_id: Id, packet_size: Id, packet_alignment: Id },
+ .OpGroupCommitWritePipe => struct { execution: Id, pipe: Id, reserve_id: Id, packet_size: Id, packet_alignment: Id },
+ .OpEnqueueMarker => struct { id_result_type: Id, id_result: Id, queue: Id, num_events: Id, wait_events: Id, ret_event: Id },
+ .OpEnqueueKernel => struct { id_result_type: Id, id_result: Id, queue: Id, flags: Id, nd_range: Id, num_events: Id, wait_events: Id, ret_event: Id, invoke: Id, param: Id, param_size: Id, param_align: Id, local_size: []const Id = &.{} },
+ .OpGetKernelNDrangeSubGroupCount => struct { id_result_type: Id, id_result: Id, nd_range: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpGetKernelNDrangeMaxSubGroupSize => struct { id_result_type: Id, id_result: Id, nd_range: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpGetKernelWorkGroupSize => struct { id_result_type: Id, id_result: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpGetKernelPreferredWorkGroupSizeMultiple => struct { id_result_type: Id, id_result: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpRetainEvent => struct { event: Id },
+ .OpReleaseEvent => struct { event: Id },
+ .OpCreateUserEvent => struct { id_result_type: Id, id_result: Id },
+ .OpIsValidEvent => struct { id_result_type: Id, id_result: Id, event: Id },
+ .OpSetUserEventStatus => struct { event: Id, status: Id },
+ .OpCaptureEventProfilingInfo => struct { event: Id, profiling_info: Id, value: Id },
+ .OpGetDefaultQueue => struct { id_result_type: Id, id_result: Id },
+ .OpBuildNDRange => struct { id_result_type: Id, id_result: Id, global_work_size: Id, local_work_size: Id, global_work_offset: Id },
+ .OpImageSparseSampleImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleDrefImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleDrefExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleProjImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleProjExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, image_operands: ImageOperands.Extended },
+ .OpImageSparseSampleProjDrefImplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseSampleProjDrefExplicitLod => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ImageOperands.Extended },
+ .OpImageSparseFetch => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseGather => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, component: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseDrefGather => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, d_ref: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpImageSparseTexelsResident => struct { id_result_type: Id, id_result: Id, resident_code: Id },
+ .OpNoLine => void,
+ .OpAtomicFlagTestAndSet => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id },
+ .OpAtomicFlagClear => struct { pointer: Id, memory: Id, semantics: Id },
+ .OpImageSparseRead => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpSizeOf => struct { id_result_type: Id, id_result: Id, pointer: Id },
+ .OpTypePipeStorage => struct { id_result: Id },
+ .OpConstantPipeStorage => struct { id_result_type: Id, id_result: Id, packet_size: LiteralInteger, packet_alignment: LiteralInteger, capacity: LiteralInteger },
+ .OpCreatePipeFromPipeStorage => struct { id_result_type: Id, id_result: Id, pipe_storage: Id },
+ .OpGetKernelLocalSizeForSubgroupCount => struct { id_result_type: Id, id_result: Id, subgroup_count: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpGetKernelMaxNumSubgroups => struct { id_result_type: Id, id_result: Id, invoke: Id, param: Id, param_size: Id, param_align: Id },
+ .OpTypeNamedBarrier => struct { id_result: Id },
+ .OpNamedBarrierInitialize => struct { id_result_type: Id, id_result: Id, subgroup_count: Id },
+ .OpMemoryNamedBarrier => struct { named_barrier: Id, memory: Id, semantics: Id },
+ .OpModuleProcessed => struct { process: LiteralString },
+ .OpExecutionModeId => struct { entry_point: Id, mode: ExecutionMode.Extended },
+ .OpDecorateId => struct { target: Id, decoration: Decoration.Extended },
+ .OpGroupNonUniformElect => struct { id_result_type: Id, id_result: Id, execution: Id },
+ .OpGroupNonUniformAll => struct { id_result_type: Id, id_result: Id, execution: Id, predicate: Id },
+ .OpGroupNonUniformAny => struct { id_result_type: Id, id_result: Id, execution: Id, predicate: Id },
+ .OpGroupNonUniformAllEqual => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id },
+ .OpGroupNonUniformBroadcast => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, id: Id },
+ .OpGroupNonUniformBroadcastFirst => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id },
+ .OpGroupNonUniformBallot => struct { id_result_type: Id, id_result: Id, execution: Id, predicate: Id },
+ .OpGroupNonUniformInverseBallot => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id },
+ .OpGroupNonUniformBallotBitExtract => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, index: Id },
+ .OpGroupNonUniformBallotBitCount => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id },
+ .OpGroupNonUniformBallotFindLSB => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id },
+ .OpGroupNonUniformBallotFindMSB => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id },
+ .OpGroupNonUniformShuffle => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, id: Id },
+ .OpGroupNonUniformShuffleXor => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, mask: Id },
+ .OpGroupNonUniformShuffleUp => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, delta: Id },
+ .OpGroupNonUniformShuffleDown => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, delta: Id },
+ .OpGroupNonUniformIAdd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformFAdd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformIMul => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformFMul => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformSMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformUMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformFMin => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformSMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformUMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformFMax => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformBitwiseAnd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformBitwiseOr => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformBitwiseXor => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformLogicalAnd => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformLogicalOr => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformLogicalXor => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, value: Id, cluster_size: ?Id = null },
+ .OpGroupNonUniformQuadBroadcast => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, index: Id },
+ .OpGroupNonUniformQuadSwap => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, direction: Id },
+ .OpCopyLogical => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpPtrEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpPtrNotEqual => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpPtrDiff => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpColorAttachmentReadEXT => struct { id_result_type: Id, id_result: Id, attachment: Id, sample: ?Id = null },
+ .OpDepthAttachmentReadEXT => struct { id_result_type: Id, id_result: Id, sample: ?Id = null },
+ .OpStencilAttachmentReadEXT => struct { id_result_type: Id, id_result: Id, sample: ?Id = null },
+ .OpTypeTensorARM => struct { id_result: Id, element_type: Id, rank: ?Id = null, shape: ?Id = null },
+ .OpTensorReadARM => struct { id_result_type: Id, id_result: Id, tensor: Id, coordinates: Id, tensor_operands: ?TensorOperands.Extended = null },
+ .OpTensorWriteARM => struct { tensor: Id, coordinates: Id, object: Id, tensor_operands: ?TensorOperands.Extended = null },
+ .OpTensorQuerySizeARM => struct { id_result_type: Id, id_result: Id, tensor: Id, dimension: Id },
+ .OpGraphConstantARM => struct { id_result_type: Id, id_result: Id, graph_constant_id: LiteralInteger },
+ .OpGraphEntryPointARM => struct { graph: Id, name: LiteralString, interface: []const Id = &.{} },
+ .OpGraphARM => struct { id_result_type: Id, id_result: Id },
+ .OpGraphInputARM => struct { id_result_type: Id, id_result: Id, input_index: Id, element_index: []const Id = &.{} },
+ .OpGraphSetOutputARM => struct { value: Id, output_index: Id, element_index: []const Id = &.{} },
+ .OpGraphEndARM => void,
+ .OpTypeGraphARM => struct { id_result: Id, num_inputs: LiteralInteger, in_out_types: []const Id = &.{} },
+ .OpTerminateInvocation => void,
+ .OpTypeUntypedPointerKHR => struct { id_result: Id, storage_class: StorageClass },
+ .OpUntypedVariableKHR => struct { id_result_type: Id, id_result: Id, storage_class: StorageClass, data_type: ?Id = null, initializer: ?Id = null },
+ .OpUntypedAccessChainKHR => struct { id_result_type: Id, id_result: Id, base_type: Id, base: Id, indexes: []const Id = &.{} },
+ .OpUntypedInBoundsAccessChainKHR => struct { id_result_type: Id, id_result: Id, base_type: Id, base: Id, indexes: []const Id = &.{} },
+ .OpSubgroupBallotKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpSubgroupFirstInvocationKHR => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpUntypedPtrAccessChainKHR => struct { id_result_type: Id, id_result: Id, base_type: Id, base: Id, element: Id, indexes: []const Id = &.{} },
+ .OpUntypedInBoundsPtrAccessChainKHR => struct { id_result_type: Id, id_result: Id, base_type: Id, base: Id, element: Id, indexes: []const Id = &.{} },
+ .OpUntypedArrayLengthKHR => struct { id_result_type: Id, id_result: Id, structure: Id, pointer: Id, array_member: LiteralInteger },
+ .OpUntypedPrefetchKHR => struct { pointer_type: Id, num_bytes: Id, rw: ?Id = null, locality: ?Id = null, cache_type: ?Id = null },
+ .OpSubgroupAllKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpSubgroupAnyKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpSubgroupAllEqualKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpGroupNonUniformRotateKHR => struct { id_result_type: Id, id_result: Id, execution: Id, value: Id, delta: Id, cluster_size: ?Id = null },
+ .OpSubgroupReadInvocationKHR => struct { id_result_type: Id, id_result: Id, value: Id, index: Id },
+ .OpExtInstWithForwardRefsKHR => struct { id_result_type: Id, id_result: Id, set: Id, instruction: LiteralExtInstInteger, id_ref_4: []const Id = &.{} },
+ .OpTraceRayKHR => struct { accel: Id, ray_flags: Id, cull_mask: Id, sbt_offset: Id, sbt_stride: Id, miss_index: Id, ray_origin: Id, ray_tmin: Id, ray_direction: Id, ray_tmax: Id, payload: Id },
+ .OpExecuteCallableKHR => struct { sbt_index: Id, callable_data: Id },
+ .OpConvertUToAccelerationStructureKHR => struct { id_result_type: Id, id_result: Id, accel: Id },
+ .OpIgnoreIntersectionKHR => void,
+ .OpTerminateRayKHR => void,
+ .OpSDot => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpUDot => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpSUDot => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpSDotAccSat => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, accumulator: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpUDotAccSat => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, accumulator: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpSUDotAccSat => struct { id_result_type: Id, id_result: Id, vector_1: Id, vector_2: Id, accumulator: Id, packed_vector_format: ?PackedVectorFormat = null },
+ .OpTypeCooperativeMatrixKHR => struct { id_result: Id, component_type: Id, scope: Id, rows: Id, columns: Id, use: Id },
+ .OpCooperativeMatrixLoadKHR => struct { id_result_type: Id, id_result: Id, pointer: Id, memory_layout: Id, stride: ?Id = null, memory_operand: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixStoreKHR => struct { pointer: Id, object: Id, memory_layout: Id, stride: ?Id = null, memory_operand: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixMulAddKHR => struct { id_result_type: Id, id_result: Id, a: Id, b: Id, c: Id, cooperative_matrix_operands: ?CooperativeMatrixOperands = null },
+ .OpCooperativeMatrixLengthKHR => struct { id_result_type: Id, id_result: Id, type: Id },
+ .OpConstantCompositeReplicateEXT => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpSpecConstantCompositeReplicateEXT => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpCompositeConstructReplicateEXT => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpTypeRayQueryKHR => struct { id_result: Id },
+ .OpRayQueryInitializeKHR => struct { ray_query: Id, accel: Id, ray_flags: Id, cull_mask: Id, ray_origin: Id, ray_t_min: Id, ray_direction: Id, ray_t_max: Id },
+ .OpRayQueryTerminateKHR => struct { ray_query: Id },
+ .OpRayQueryGenerateIntersectionKHR => struct { ray_query: Id, hit_t: Id },
+ .OpRayQueryConfirmIntersectionKHR => struct { ray_query: Id },
+ .OpRayQueryProceedKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetIntersectionTypeKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpImageSampleWeightedQCOM => struct { id_result_type: Id, id_result: Id, texture: Id, coordinates: Id, weights: Id },
+ .OpImageBoxFilterQCOM => struct { id_result_type: Id, id_result: Id, texture: Id, coordinates: Id, box_size: Id },
+ .OpImageBlockMatchSSDQCOM => struct { id_result_type: Id, id_result: Id, target: Id, target_coordinates: Id, reference: Id, reference_coordinates: Id, block_size: Id },
+ .OpImageBlockMatchSADQCOM => struct { id_result_type: Id, id_result: Id, target: Id, target_coordinates: Id, reference: Id, reference_coordinates: Id, block_size: Id },
+ .OpImageBlockMatchWindowSSDQCOM => struct { id_result_type: Id, id_result: Id, target_sampled_image: Id, target_coordinates: Id, reference_sampled_image: Id, reference_coordinates: Id, block_size: Id },
+ .OpImageBlockMatchWindowSADQCOM => struct { id_result_type: Id, id_result: Id, target_sampled_image: Id, target_coordinates: Id, reference_sampled_image: Id, reference_coordinates: Id, block_size: Id },
+ .OpImageBlockMatchGatherSSDQCOM => struct { id_result_type: Id, id_result: Id, target_sampled_image: Id, target_coordinates: Id, reference_sampled_image: Id, reference_coordinates: Id, block_size: Id },
+ .OpImageBlockMatchGatherSADQCOM => struct { id_result_type: Id, id_result: Id, target_sampled_image: Id, target_coordinates: Id, reference_sampled_image: Id, reference_coordinates: Id, block_size: Id },
+ .OpGroupIAddNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFAddNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFMinNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupUMinNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupSMinNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFMaxNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupUMaxNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupSMaxNonUniformAMD => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpFragmentMaskFetchAMD => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id },
+ .OpFragmentFetchAMD => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, fragment_index: Id },
+ .OpReadClockKHR => struct { id_result_type: Id, id_result: Id, scope: Id },
+ .OpAllocateNodePayloadsAMDX => struct { id_result_type: Id, id_result: Id, visibility: Id, payload_count: Id, node_index: Id },
+ .OpEnqueueNodePayloadsAMDX => struct { payload_array: Id },
+ .OpTypeNodePayloadArrayAMDX => struct { id_result: Id, payload_type: Id },
+ .OpFinishWritingNodePayloadAMDX => struct { id_result_type: Id, id_result: Id, payload: Id },
+ .OpNodePayloadArrayLengthAMDX => struct { id_result_type: Id, id_result: Id, payload_array: Id },
+ .OpIsNodePayloadValidAMDX => struct { id_result_type: Id, id_result: Id, payload_type: Id, node_index: Id },
+ .OpConstantStringAMDX => struct { id_result: Id, literal_string: LiteralString },
+ .OpSpecConstantStringAMDX => struct { id_result: Id, literal_string: LiteralString },
+ .OpGroupNonUniformQuadAllKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpGroupNonUniformQuadAnyKHR => struct { id_result_type: Id, id_result: Id, predicate: Id },
+ .OpHitObjectRecordHitMotionNV => struct { hit_object: Id, acceleration_structure: Id, instance_id: Id, primitive_id: Id, geometry_index: Id, hit_kind: Id, sbt_record_offset: Id, sbt_record_stride: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, current_time: Id, hit_object_attributes: Id },
+ .OpHitObjectRecordHitWithIndexMotionNV => struct { hit_object: Id, acceleration_structure: Id, instance_id: Id, primitive_id: Id, geometry_index: Id, hit_kind: Id, sbt_record_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, current_time: Id, hit_object_attributes: Id },
+ .OpHitObjectRecordMissMotionNV => struct { hit_object: Id, sbt_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, current_time: Id },
+ .OpHitObjectGetWorldToObjectNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetObjectToWorldNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetObjectRayDirectionNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetObjectRayOriginNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectTraceRayMotionNV => struct { hit_object: Id, acceleration_structure: Id, ray_flags: Id, cullmask: Id, sbt_record_offset: Id, sbt_record_stride: Id, miss_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, time: Id, payload: Id },
+ .OpHitObjectGetShaderRecordBufferHandleNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetShaderBindingTableRecordIndexNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectRecordEmptyNV => struct { hit_object: Id },
+ .OpHitObjectTraceRayNV => struct { hit_object: Id, acceleration_structure: Id, ray_flags: Id, cullmask: Id, sbt_record_offset: Id, sbt_record_stride: Id, miss_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, payload: Id },
+ .OpHitObjectRecordHitNV => struct { hit_object: Id, acceleration_structure: Id, instance_id: Id, primitive_id: Id, geometry_index: Id, hit_kind: Id, sbt_record_offset: Id, sbt_record_stride: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, hit_object_attributes: Id },
+ .OpHitObjectRecordHitWithIndexNV => struct { hit_object: Id, acceleration_structure: Id, instance_id: Id, primitive_id: Id, geometry_index: Id, hit_kind: Id, sbt_record_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id, hit_object_attributes: Id },
+ .OpHitObjectRecordMissNV => struct { hit_object: Id, sbt_index: Id, origin: Id, t_min: Id, direction: Id, t_max: Id },
+ .OpHitObjectExecuteShaderNV => struct { hit_object: Id, payload: Id },
+ .OpHitObjectGetCurrentTimeNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetAttributesNV => struct { hit_object: Id, hit_object_attribute: Id },
+ .OpHitObjectGetHitKindNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetPrimitiveIndexNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetGeometryIndexNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetInstanceIdNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetInstanceCustomIndexNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetWorldRayDirectionNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetWorldRayOriginNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetRayTMaxNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetRayTMinNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectIsEmptyNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectIsHitNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectIsMissNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpReorderThreadWithHitObjectNV => struct { hit_object: Id, hint: ?Id = null, bits: ?Id = null },
+ .OpReorderThreadWithHintNV => struct { hint: Id, bits: Id },
+ .OpTypeHitObjectNV => struct { id_result: Id },
+ .OpImageSampleFootprintNV => struct { id_result_type: Id, id_result: Id, sampled_image: Id, coordinate: Id, granularity: Id, coarse: Id, image_operands: ?ImageOperands.Extended = null },
+ .OpTypeCooperativeVectorNV => struct { id_result: Id, component_type: Id, component_count: Id },
+ .OpCooperativeVectorMatrixMulNV => struct { id_result_type: Id, id_result: Id, input: Id, input_interpretation: Id, matrix: Id, matrix_offset: Id, matrix_interpretation: Id, m: Id, k: Id, memory_layout: Id, transpose: Id, matrix_stride: ?Id = null, cooperative_matrix_operands: ?CooperativeMatrixOperands = null },
+ .OpCooperativeVectorOuterProductAccumulateNV => struct { pointer: Id, offset: Id, a: Id, b: Id, memory_layout: Id, matrix_interpretation: Id, matrix_stride: ?Id = null },
+ .OpCooperativeVectorReduceSumAccumulateNV => struct { pointer: Id, offset: Id, v: Id },
+ .OpCooperativeVectorMatrixMulAddNV => struct { id_result_type: Id, id_result: Id, input: Id, input_interpretation: Id, matrix: Id, matrix_offset: Id, matrix_interpretation: Id, bias: Id, bias_offset: Id, bias_interpretation: Id, m: Id, k: Id, memory_layout: Id, transpose: Id, matrix_stride: ?Id = null, cooperative_matrix_operands: ?CooperativeMatrixOperands = null },
+ .OpCooperativeMatrixConvertNV => struct { id_result_type: Id, id_result: Id, matrix: Id },
+ .OpEmitMeshTasksEXT => struct { group_count_x: Id, group_count_y: Id, group_count_z: Id, payload: ?Id = null },
+ .OpSetMeshOutputsEXT => struct { vertex_count: Id, primitive_count: Id },
+ .OpGroupNonUniformPartitionNV => struct { id_result_type: Id, id_result: Id, value: Id },
+ .OpWritePackedPrimitiveIndices4x8NV => struct { index_offset: Id, packed_indices: Id },
+ .OpFetchMicroTriangleVertexPositionNV => struct { id_result_type: Id, id_result: Id, accel: Id, instance_id: Id, geometry_index: Id, primitive_index: Id, barycentric: Id },
+ .OpFetchMicroTriangleVertexBarycentricNV => struct { id_result_type: Id, id_result: Id, accel: Id, instance_id: Id, geometry_index: Id, primitive_index: Id, barycentric: Id },
+ .OpCooperativeVectorLoadNV => struct { id_result_type: Id, id_result: Id, pointer: Id, offset: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpCooperativeVectorStoreNV => struct { pointer: Id, offset: Id, object: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpReportIntersectionKHR => struct { id_result_type: Id, id_result: Id, hit: Id, hit_kind: Id },
+ .OpIgnoreIntersectionNV => void,
+ .OpTerminateRayNV => void,
+ .OpTraceNV => struct { accel: Id, ray_flags: Id, cull_mask: Id, sbt_offset: Id, sbt_stride: Id, miss_index: Id, ray_origin: Id, ray_tmin: Id, ray_direction: Id, ray_tmax: Id, payload_id: Id },
+ .OpTraceMotionNV => struct { accel: Id, ray_flags: Id, cull_mask: Id, sbt_offset: Id, sbt_stride: Id, miss_index: Id, ray_origin: Id, ray_tmin: Id, ray_direction: Id, ray_tmax: Id, time: Id, payload_id: Id },
+ .OpTraceRayMotionNV => struct { accel: Id, ray_flags: Id, cull_mask: Id, sbt_offset: Id, sbt_stride: Id, miss_index: Id, ray_origin: Id, ray_tmin: Id, ray_direction: Id, ray_tmax: Id, time: Id, payload: Id },
+ .OpRayQueryGetIntersectionTriangleVertexPositionsKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpTypeAccelerationStructureKHR => struct { id_result: Id },
+ .OpExecuteCallableNV => struct { sbt_index: Id, callable_data_id: Id },
+ .OpRayQueryGetClusterIdNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpHitObjectGetClusterIdNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpTypeCooperativeMatrixNV => struct { id_result: Id, component_type: Id, execution: Id, rows: Id, columns: Id },
+ .OpCooperativeMatrixLoadNV => struct { id_result_type: Id, id_result: Id, pointer: Id, stride: Id, column_major: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixStoreNV => struct { pointer: Id, object: Id, stride: Id, column_major: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpCooperativeMatrixMulAddNV => struct { id_result_type: Id, id_result: Id, a: Id, b: Id, c: Id },
+ .OpCooperativeMatrixLengthNV => struct { id_result_type: Id, id_result: Id, type: Id },
+ .OpBeginInvocationInterlockEXT => void,
+ .OpEndInvocationInterlockEXT => void,
+ .OpCooperativeMatrixReduceNV => struct { id_result_type: Id, id_result: Id, matrix: Id, reduce: CooperativeMatrixReduce, combine_func: Id },
+ .OpCooperativeMatrixLoadTensorNV => struct { id_result_type: Id, id_result: Id, pointer: Id, object: Id, tensor_layout: Id, memory_operand: MemoryAccess.Extended, tensor_addressing_operands: TensorAddressingOperands.Extended },
+ .OpCooperativeMatrixStoreTensorNV => struct { pointer: Id, object: Id, tensor_layout: Id, memory_operand: MemoryAccess.Extended, tensor_addressing_operands: TensorAddressingOperands.Extended },
+ .OpCooperativeMatrixPerElementOpNV => struct { id_result_type: Id, id_result: Id, matrix: Id, func: Id, operands: []const Id = &.{} },
+ .OpTypeTensorLayoutNV => struct { id_result: Id, dim: Id, clamp_mode: Id },
+ .OpTypeTensorViewNV => struct { id_result: Id, dim: Id, has_dimensions: Id, p: []const Id = &.{} },
+ .OpCreateTensorLayoutNV => struct { id_result_type: Id, id_result: Id },
+ .OpTensorLayoutSetDimensionNV => struct { id_result_type: Id, id_result: Id, tensor_layout: Id, dim: []const Id = &.{} },
+ .OpTensorLayoutSetStrideNV => struct { id_result_type: Id, id_result: Id, tensor_layout: Id, stride: []const Id = &.{} },
+ .OpTensorLayoutSliceNV => struct { id_result_type: Id, id_result: Id, tensor_layout: Id, operands: []const Id = &.{} },
+ .OpTensorLayoutSetClampValueNV => struct { id_result_type: Id, id_result: Id, tensor_layout: Id, value: Id },
+ .OpCreateTensorViewNV => struct { id_result_type: Id, id_result: Id },
+ .OpTensorViewSetDimensionNV => struct { id_result_type: Id, id_result: Id, tensor_view: Id, dim: []const Id = &.{} },
+ .OpTensorViewSetStrideNV => struct { id_result_type: Id, id_result: Id, tensor_view: Id, stride: []const Id = &.{} },
+ .OpDemoteToHelperInvocation => void,
+ .OpIsHelperInvocationEXT => struct { id_result_type: Id, id_result: Id },
+ .OpTensorViewSetClipNV => struct { id_result_type: Id, id_result: Id, tensor_view: Id, clip_row_offset: Id, clip_row_span: Id, clip_col_offset: Id, clip_col_span: Id },
+ .OpTensorLayoutSetBlockSizeNV => struct { id_result_type: Id, id_result: Id, tensor_layout: Id, block_size: []const Id = &.{} },
+ .OpCooperativeMatrixTransposeNV => struct { id_result_type: Id, id_result: Id, matrix: Id },
+ .OpConvertUToImageNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertUToSamplerNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertImageToUNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertSamplerToUNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertUToSampledImageNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertSampledImageToUNV => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpSamplerImageAddressingModeNV => struct { bit_width: LiteralInteger },
+ .OpRawAccessChainNV => struct { id_result_type: Id, id_result: Id, base: Id, byte_stride: Id, element_index: Id, byte_offset: Id, raw_access_chain_operands: ?RawAccessChainOperands = null },
+ .OpRayQueryGetIntersectionSpherePositionNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionSphereRadiusNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionLSSPositionsNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionLSSRadiiNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionLSSHitValueNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpHitObjectGetSpherePositionNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetSphereRadiusNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetLSSPositionsNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectGetLSSRadiiNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectIsSphereHitNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpHitObjectIsLSSHitNV => struct { id_result_type: Id, id_result: Id, hit_object: Id },
+ .OpRayQueryIsSphereHitNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryIsLSSHitNV => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpSubgroupShuffleINTEL => struct { id_result_type: Id, id_result: Id, data: Id, invocation_id: Id },
+ .OpSubgroupShuffleDownINTEL => struct { id_result_type: Id, id_result: Id, current: Id, next: Id, delta: Id },
+ .OpSubgroupShuffleUpINTEL => struct { id_result_type: Id, id_result: Id, previous: Id, current: Id, delta: Id },
+ .OpSubgroupShuffleXorINTEL => struct { id_result_type: Id, id_result: Id, data: Id, value: Id },
+ .OpSubgroupBlockReadINTEL => struct { id_result_type: Id, id_result: Id, ptr: Id },
+ .OpSubgroupBlockWriteINTEL => struct { ptr: Id, data: Id },
+ .OpSubgroupImageBlockReadINTEL => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id },
+ .OpSubgroupImageBlockWriteINTEL => struct { image: Id, coordinate: Id, data: Id },
+ .OpSubgroupImageMediaBlockReadINTEL => struct { id_result_type: Id, id_result: Id, image: Id, coordinate: Id, width: Id, height: Id },
+ .OpSubgroupImageMediaBlockWriteINTEL => struct { image: Id, coordinate: Id, width: Id, height: Id, data: Id },
+ .OpUCountLeadingZerosINTEL => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpUCountTrailingZerosINTEL => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpAbsISubINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpAbsUSubINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpIAddSatINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUAddSatINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpIAverageINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUAverageINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpIAverageRoundedINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUAverageRoundedINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpISubSatINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUSubSatINTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpIMul32x16INTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpUMul32x16INTEL => struct { id_result_type: Id, id_result: Id, operand_1: Id, operand_2: Id },
+ .OpAtomicFMinEXT => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAtomicFMaxEXT => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpAssumeTrueKHR => struct { condition: Id },
+ .OpExpectKHR => struct { id_result_type: Id, id_result: Id, value: Id, expected_value: Id },
+ .OpDecorateString => struct { target: Id, decoration: Decoration.Extended },
+ .OpMemberDecorateString => struct { struct_type: Id, member: LiteralInteger, decoration: Decoration.Extended },
+ .OpLoopControlINTEL => struct { loop_control_parameters: []const LiteralInteger = &.{} },
+ .OpReadPipeBlockingINTEL => struct { id_result_type: Id, id_result: Id, packet_size: Id, packet_alignment: Id },
+ .OpWritePipeBlockingINTEL => struct { id_result_type: Id, id_result: Id, packet_size: Id, packet_alignment: Id },
+ .OpFPGARegINTEL => struct { id_result_type: Id, id_result: Id, input: Id },
+ .OpRayQueryGetRayTMinKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetRayFlagsKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetIntersectionTKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionInstanceCustomIndexKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionInstanceIdKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionGeometryIndexKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionPrimitiveIndexKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionBarycentricsKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionFrontFaceKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetIntersectionObjectRayDirectionKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionObjectRayOriginKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetWorldRayDirectionKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetWorldRayOriginKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id },
+ .OpRayQueryGetIntersectionObjectToWorldKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpRayQueryGetIntersectionWorldToObjectKHR => struct { id_result_type: Id, id_result: Id, ray_query: Id, intersection: Id },
+ .OpAtomicFAddEXT => struct { id_result_type: Id, id_result: Id, pointer: Id, memory: Id, semantics: Id, value: Id },
+ .OpTypeBufferSurfaceINTEL => struct { id_result: Id, access_qualifier: AccessQualifier },
+ .OpTypeStructContinuedINTEL => struct { id_ref: []const Id = &.{} },
+ .OpConstantCompositeContinuedINTEL => struct { constituents: []const Id = &.{} },
+ .OpSpecConstantCompositeContinuedINTEL => struct { constituents: []const Id = &.{} },
+ .OpCompositeConstructContinuedINTEL => struct { id_result_type: Id, id_result: Id, constituents: []const Id = &.{} },
+ .OpConvertFToBF16INTEL => struct { id_result_type: Id, id_result: Id, float_value: Id },
+ .OpConvertBF16ToFINTEL => struct { id_result_type: Id, id_result: Id, b_float16_value: Id },
+ .OpControlBarrierArriveINTEL => struct { execution: Id, memory: Id, semantics: Id },
+ .OpControlBarrierWaitINTEL => struct { execution: Id, memory: Id, semantics: Id },
+ .OpArithmeticFenceEXT => struct { id_result_type: Id, id_result: Id, target: Id },
+ .OpTaskSequenceCreateINTEL => struct { id_result_type: Id, id_result: Id, function: Id, pipelined: LiteralInteger, use_stall_enable_clusters: LiteralInteger, get_capacity: LiteralInteger, async_capacity: LiteralInteger },
+ .OpTaskSequenceAsyncINTEL => struct { sequence: Id, arguments: []const Id = &.{} },
+ .OpTaskSequenceGetINTEL => struct { id_result_type: Id, id_result: Id, sequence: Id },
+ .OpTaskSequenceReleaseINTEL => struct { sequence: Id },
+ .OpTypeTaskSequenceINTEL => struct { id_result: Id },
+ .OpSubgroupBlockPrefetchINTEL => struct { ptr: Id, num_bytes: Id, memory_access: ?MemoryAccess.Extended = null },
+ .OpSubgroup2DBlockLoadINTEL => struct { element_size: Id, block_width: Id, block_height: Id, block_count: Id, src_base_pointer: Id, memory_width: Id, memory_height: Id, memory_pitch: Id, coordinate: Id, dst_pointer: Id },
+ .OpSubgroup2DBlockLoadTransformINTEL => struct { element_size: Id, block_width: Id, block_height: Id, block_count: Id, src_base_pointer: Id, memory_width: Id, memory_height: Id, memory_pitch: Id, coordinate: Id, dst_pointer: Id },
+ .OpSubgroup2DBlockLoadTransposeINTEL => struct { element_size: Id, block_width: Id, block_height: Id, block_count: Id, src_base_pointer: Id, memory_width: Id, memory_height: Id, memory_pitch: Id, coordinate: Id, dst_pointer: Id },
+ .OpSubgroup2DBlockPrefetchINTEL => struct { element_size: Id, block_width: Id, block_height: Id, block_count: Id, src_base_pointer: Id, memory_width: Id, memory_height: Id, memory_pitch: Id, coordinate: Id },
+ .OpSubgroup2DBlockStoreINTEL => struct { element_size: Id, block_width: Id, block_height: Id, block_count: Id, src_pointer: Id, dst_base_pointer: Id, memory_width: Id, memory_height: Id, memory_pitch: Id, coordinate: Id },
+ .OpSubgroupMatrixMultiplyAccumulateINTEL => struct { id_result_type: Id, id_result: Id, k_dim: Id, matrix_a: Id, matrix_b: Id, matrix_c: Id, matrix_multiply_accumulate_operands: ?MatrixMultiplyAccumulateOperands = null },
+ .OpBitwiseFunctionINTEL => struct { id_result_type: Id, id_result: Id, a: Id, b: Id, c: Id, lut_index: Id },
+ .OpGroupIMulKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupFMulKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupBitwiseAndKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupBitwiseOrKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupBitwiseXorKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupLogicalAndKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupLogicalOrKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpGroupLogicalXorKHR => struct { id_result_type: Id, id_result: Id, execution: Id, operation: GroupOperation, x: Id },
+ .OpRoundFToTF32INTEL => struct { id_result_type: Id, id_result: Id, float_value: Id },
+ .OpMaskedGatherINTEL => struct { id_result_type: Id, id_result: Id, ptr_vector: Id, alignment: LiteralInteger, mask: Id, fill_empty: Id },
+ .OpMaskedScatterINTEL => struct { input_vector: Id, ptr_vector: Id, alignment: LiteralInteger, mask: Id },
+ .OpConvertHandleToImageINTEL => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertHandleToSamplerINTEL => struct { id_result_type: Id, id_result: Id, operand: Id },
+ .OpConvertHandleToSampledImageINTEL => struct { id_result_type: Id, id_result: Id, operand: Id },
+ };
+ }
+ pub fn class(self: Opcode) Class {
+ return switch (self) {
+ .OpNop => .miscellaneous,
+ .OpUndef => .miscellaneous,
+ .OpSourceContinued => .debug,
+ .OpSource => .debug,
+ .OpSourceExtension => .debug,
+ .OpName => .debug,
+ .OpMemberName => .debug,
+ .OpString => .debug,
+ .OpLine => .debug,
+ .OpExtension => .extension,
+ .OpExtInstImport => .extension,
+ .OpExtInst => .extension,
+ .OpMemoryModel => .mode_setting,
+ .OpEntryPoint => .mode_setting,
+ .OpExecutionMode => .mode_setting,
+ .OpCapability => .mode_setting,
+ .OpTypeVoid => .type_declaration,
+ .OpTypeBool => .type_declaration,
+ .OpTypeInt => .type_declaration,
+ .OpTypeFloat => .type_declaration,
+ .OpTypeVector => .type_declaration,
+ .OpTypeMatrix => .type_declaration,
+ .OpTypeImage => .type_declaration,
+ .OpTypeSampler => .type_declaration,
+ .OpTypeSampledImage => .type_declaration,
+ .OpTypeArray => .type_declaration,
+ .OpTypeRuntimeArray => .type_declaration,
+ .OpTypeStruct => .type_declaration,
+ .OpTypeOpaque => .type_declaration,
+ .OpTypePointer => .type_declaration,
+ .OpTypeFunction => .type_declaration,
+ .OpTypeEvent => .type_declaration,
+ .OpTypeDeviceEvent => .type_declaration,
+ .OpTypeReserveId => .type_declaration,
+ .OpTypeQueue => .type_declaration,
+ .OpTypePipe => .type_declaration,
+ .OpTypeForwardPointer => .type_declaration,
+ .OpConstantTrue => .constant_creation,
+ .OpConstantFalse => .constant_creation,
+ .OpConstant => .constant_creation,
+ .OpConstantComposite => .constant_creation,
+ .OpConstantSampler => .constant_creation,
+ .OpConstantNull => .constant_creation,
+ .OpSpecConstantTrue => .constant_creation,
+ .OpSpecConstantFalse => .constant_creation,
+ .OpSpecConstant => .constant_creation,
+ .OpSpecConstantComposite => .constant_creation,
+ .OpSpecConstantOp => .constant_creation,
+ .OpFunction => .function,
+ .OpFunctionParameter => .function,
+ .OpFunctionEnd => .function,
+ .OpFunctionCall => .function,
+ .OpVariable => .memory,
+ .OpImageTexelPointer => .memory,
+ .OpLoad => .memory,
+ .OpStore => .memory,
+ .OpCopyMemory => .memory,
+ .OpCopyMemorySized => .memory,
+ .OpAccessChain => .memory,
+ .OpInBoundsAccessChain => .memory,
+ .OpPtrAccessChain => .memory,
+ .OpArrayLength => .memory,
+ .OpGenericPtrMemSemantics => .memory,
+ .OpInBoundsPtrAccessChain => .memory,
+ .OpDecorate => .annotation,
+ .OpMemberDecorate => .annotation,
+ .OpDecorationGroup => .annotation,
+ .OpGroupDecorate => .annotation,
+ .OpGroupMemberDecorate => .annotation,
+ .OpVectorExtractDynamic => .composite,
+ .OpVectorInsertDynamic => .composite,
+ .OpVectorShuffle => .composite,
+ .OpCompositeConstruct => .composite,
+ .OpCompositeExtract => .composite,
+ .OpCompositeInsert => .composite,
+ .OpCopyObject => .composite,
+ .OpTranspose => .composite,
+ .OpSampledImage => .image,
+ .OpImageSampleImplicitLod => .image,
+ .OpImageSampleExplicitLod => .image,
+ .OpImageSampleDrefImplicitLod => .image,
+ .OpImageSampleDrefExplicitLod => .image,
+ .OpImageSampleProjImplicitLod => .image,
+ .OpImageSampleProjExplicitLod => .image,
+ .OpImageSampleProjDrefImplicitLod => .image,
+ .OpImageSampleProjDrefExplicitLod => .image,
+ .OpImageFetch => .image,
+ .OpImageGather => .image,
+ .OpImageDrefGather => .image,
+ .OpImageRead => .image,
+ .OpImageWrite => .image,
+ .OpImage => .image,
+ .OpImageQueryFormat => .image,
+ .OpImageQueryOrder => .image,
+ .OpImageQuerySizeLod => .image,
+ .OpImageQuerySize => .image,
+ .OpImageQueryLod => .image,
+ .OpImageQueryLevels => .image,
+ .OpImageQuerySamples => .image,
+ .OpConvertFToU => .conversion,
+ .OpConvertFToS => .conversion,
+ .OpConvertSToF => .conversion,
+ .OpConvertUToF => .conversion,
+ .OpUConvert => .conversion,
+ .OpSConvert => .conversion,
+ .OpFConvert => .conversion,
+ .OpQuantizeToF16 => .conversion,
+ .OpConvertPtrToU => .conversion,
+ .OpSatConvertSToU => .conversion,
+ .OpSatConvertUToS => .conversion,
+ .OpConvertUToPtr => .conversion,
+ .OpPtrCastToGeneric => .conversion,
+ .OpGenericCastToPtr => .conversion,
+ .OpGenericCastToPtrExplicit => .conversion,
+ .OpBitcast => .conversion,
+ .OpSNegate => .arithmetic,
+ .OpFNegate => .arithmetic,
+ .OpIAdd => .arithmetic,
+ .OpFAdd => .arithmetic,
+ .OpISub => .arithmetic,
+ .OpFSub => .arithmetic,
+ .OpIMul => .arithmetic,
+ .OpFMul => .arithmetic,
+ .OpUDiv => .arithmetic,
+ .OpSDiv => .arithmetic,
+ .OpFDiv => .arithmetic,
+ .OpUMod => .arithmetic,
+ .OpSRem => .arithmetic,
+ .OpSMod => .arithmetic,
+ .OpFRem => .arithmetic,
+ .OpFMod => .arithmetic,
+ .OpVectorTimesScalar => .arithmetic,
+ .OpMatrixTimesScalar => .arithmetic,
+ .OpVectorTimesMatrix => .arithmetic,
+ .OpMatrixTimesVector => .arithmetic,
+ .OpMatrixTimesMatrix => .arithmetic,
+ .OpOuterProduct => .arithmetic,
+ .OpDot => .arithmetic,
+ .OpIAddCarry => .arithmetic,
+ .OpISubBorrow => .arithmetic,
+ .OpUMulExtended => .arithmetic,
+ .OpSMulExtended => .arithmetic,
+ .OpAny => .relational_and_logical,
+ .OpAll => .relational_and_logical,
+ .OpIsNan => .relational_and_logical,
+ .OpIsInf => .relational_and_logical,
+ .OpIsFinite => .relational_and_logical,
+ .OpIsNormal => .relational_and_logical,
+ .OpSignBitSet => .relational_and_logical,
+ .OpLessOrGreater => .relational_and_logical,
+ .OpOrdered => .relational_and_logical,
+ .OpUnordered => .relational_and_logical,
+ .OpLogicalEqual => .relational_and_logical,
+ .OpLogicalNotEqual => .relational_and_logical,
+ .OpLogicalOr => .relational_and_logical,
+ .OpLogicalAnd => .relational_and_logical,
+ .OpLogicalNot => .relational_and_logical,
+ .OpSelect => .relational_and_logical,
+ .OpIEqual => .relational_and_logical,
+ .OpINotEqual => .relational_and_logical,
+ .OpUGreaterThan => .relational_and_logical,
+ .OpSGreaterThan => .relational_and_logical,
+ .OpUGreaterThanEqual => .relational_and_logical,
+ .OpSGreaterThanEqual => .relational_and_logical,
+ .OpULessThan => .relational_and_logical,
+ .OpSLessThan => .relational_and_logical,
+ .OpULessThanEqual => .relational_and_logical,
+ .OpSLessThanEqual => .relational_and_logical,
+ .OpFOrdEqual => .relational_and_logical,
+ .OpFUnordEqual => .relational_and_logical,
+ .OpFOrdNotEqual => .relational_and_logical,
+ .OpFUnordNotEqual => .relational_and_logical,
+ .OpFOrdLessThan => .relational_and_logical,
+ .OpFUnordLessThan => .relational_and_logical,
+ .OpFOrdGreaterThan => .relational_and_logical,
+ .OpFUnordGreaterThan => .relational_and_logical,
+ .OpFOrdLessThanEqual => .relational_and_logical,
+ .OpFUnordLessThanEqual => .relational_and_logical,
+ .OpFOrdGreaterThanEqual => .relational_and_logical,
+ .OpFUnordGreaterThanEqual => .relational_and_logical,
+ .OpShiftRightLogical => .bit,
+ .OpShiftRightArithmetic => .bit,
+ .OpShiftLeftLogical => .bit,
+ .OpBitwiseOr => .bit,
+ .OpBitwiseXor => .bit,
+ .OpBitwiseAnd => .bit,
+ .OpNot => .bit,
+ .OpBitFieldInsert => .bit,
+ .OpBitFieldSExtract => .bit,
+ .OpBitFieldUExtract => .bit,
+ .OpBitReverse => .bit,
+ .OpBitCount => .bit,
+ .OpDPdx => .derivative,
+ .OpDPdy => .derivative,
+ .OpFwidth => .derivative,
+ .OpDPdxFine => .derivative,
+ .OpDPdyFine => .derivative,
+ .OpFwidthFine => .derivative,
+ .OpDPdxCoarse => .derivative,
+ .OpDPdyCoarse => .derivative,
+ .OpFwidthCoarse => .derivative,
+ .OpEmitVertex => .primitive,
+ .OpEndPrimitive => .primitive,
+ .OpEmitStreamVertex => .primitive,
+ .OpEndStreamPrimitive => .primitive,
+ .OpControlBarrier => .barrier,
+ .OpMemoryBarrier => .barrier,
+ .OpAtomicLoad => .atomic,
+ .OpAtomicStore => .atomic,
+ .OpAtomicExchange => .atomic,
+ .OpAtomicCompareExchange => .atomic,
+ .OpAtomicCompareExchangeWeak => .atomic,
+ .OpAtomicIIncrement => .atomic,
+ .OpAtomicIDecrement => .atomic,
+ .OpAtomicIAdd => .atomic,
+ .OpAtomicISub => .atomic,
+ .OpAtomicSMin => .atomic,
+ .OpAtomicUMin => .atomic,
+ .OpAtomicSMax => .atomic,
+ .OpAtomicUMax => .atomic,
+ .OpAtomicAnd => .atomic,
+ .OpAtomicOr => .atomic,
+ .OpAtomicXor => .atomic,
+ .OpPhi => .control_flow,
+ .OpLoopMerge => .control_flow,
+ .OpSelectionMerge => .control_flow,
+ .OpLabel => .control_flow,
+ .OpBranch => .control_flow,
+ .OpBranchConditional => .control_flow,
+ .OpSwitch => .control_flow,
+ .OpKill => .control_flow,
+ .OpReturn => .control_flow,
+ .OpReturnValue => .control_flow,
+ .OpUnreachable => .control_flow,
+ .OpLifetimeStart => .control_flow,
+ .OpLifetimeStop => .control_flow,
+ .OpGroupAsyncCopy => .group,
+ .OpGroupWaitEvents => .group,
+ .OpGroupAll => .group,
+ .OpGroupAny => .group,
+ .OpGroupBroadcast => .group,
+ .OpGroupIAdd => .group,
+ .OpGroupFAdd => .group,
+ .OpGroupFMin => .group,
+ .OpGroupUMin => .group,
+ .OpGroupSMin => .group,
+ .OpGroupFMax => .group,
+ .OpGroupUMax => .group,
+ .OpGroupSMax => .group,
+ .OpReadPipe => .pipe,
+ .OpWritePipe => .pipe,
+ .OpReservedReadPipe => .pipe,
+ .OpReservedWritePipe => .pipe,
+ .OpReserveReadPipePackets => .pipe,
+ .OpReserveWritePipePackets => .pipe,
+ .OpCommitReadPipe => .pipe,
+ .OpCommitWritePipe => .pipe,
+ .OpIsValidReserveId => .pipe,
+ .OpGetNumPipePackets => .pipe,
+ .OpGetMaxPipePackets => .pipe,
+ .OpGroupReserveReadPipePackets => .pipe,
+ .OpGroupReserveWritePipePackets => .pipe,
+ .OpGroupCommitReadPipe => .pipe,
+ .OpGroupCommitWritePipe => .pipe,
+ .OpEnqueueMarker => .device_side_enqueue,
+ .OpEnqueueKernel => .device_side_enqueue,
+ .OpGetKernelNDrangeSubGroupCount => .device_side_enqueue,
+ .OpGetKernelNDrangeMaxSubGroupSize => .device_side_enqueue,
+ .OpGetKernelWorkGroupSize => .device_side_enqueue,
+ .OpGetKernelPreferredWorkGroupSizeMultiple => .device_side_enqueue,
+ .OpRetainEvent => .device_side_enqueue,
+ .OpReleaseEvent => .device_side_enqueue,
+ .OpCreateUserEvent => .device_side_enqueue,
+ .OpIsValidEvent => .device_side_enqueue,
+ .OpSetUserEventStatus => .device_side_enqueue,
+ .OpCaptureEventProfilingInfo => .device_side_enqueue,
+ .OpGetDefaultQueue => .device_side_enqueue,
+ .OpBuildNDRange => .device_side_enqueue,
+ .OpImageSparseSampleImplicitLod => .image,
+ .OpImageSparseSampleExplicitLod => .image,
+ .OpImageSparseSampleDrefImplicitLod => .image,
+ .OpImageSparseSampleDrefExplicitLod => .image,
+ .OpImageSparseSampleProjImplicitLod => .image,
+ .OpImageSparseSampleProjExplicitLod => .image,
+ .OpImageSparseSampleProjDrefImplicitLod => .image,
+ .OpImageSparseSampleProjDrefExplicitLod => .image,
+ .OpImageSparseFetch => .image,
+ .OpImageSparseGather => .image,
+ .OpImageSparseDrefGather => .image,
+ .OpImageSparseTexelsResident => .image,
+ .OpNoLine => .debug,
+ .OpAtomicFlagTestAndSet => .atomic,
+ .OpAtomicFlagClear => .atomic,
+ .OpImageSparseRead => .image,
+ .OpSizeOf => .miscellaneous,
+ .OpTypePipeStorage => .type_declaration,
+ .OpConstantPipeStorage => .pipe,
+ .OpCreatePipeFromPipeStorage => .pipe,
+ .OpGetKernelLocalSizeForSubgroupCount => .device_side_enqueue,
+ .OpGetKernelMaxNumSubgroups => .device_side_enqueue,
+ .OpTypeNamedBarrier => .type_declaration,
+ .OpNamedBarrierInitialize => .barrier,
+ .OpMemoryNamedBarrier => .barrier,
+ .OpModuleProcessed => .debug,
+ .OpExecutionModeId => .mode_setting,
+ .OpDecorateId => .annotation,
+ .OpGroupNonUniformElect => .non_uniform,
+ .OpGroupNonUniformAll => .non_uniform,
+ .OpGroupNonUniformAny => .non_uniform,
+ .OpGroupNonUniformAllEqual => .non_uniform,
+ .OpGroupNonUniformBroadcast => .non_uniform,
+ .OpGroupNonUniformBroadcastFirst => .non_uniform,
+ .OpGroupNonUniformBallot => .non_uniform,
+ .OpGroupNonUniformInverseBallot => .non_uniform,
+ .OpGroupNonUniformBallotBitExtract => .non_uniform,
+ .OpGroupNonUniformBallotBitCount => .non_uniform,
+ .OpGroupNonUniformBallotFindLSB => .non_uniform,
+ .OpGroupNonUniformBallotFindMSB => .non_uniform,
+ .OpGroupNonUniformShuffle => .non_uniform,
+ .OpGroupNonUniformShuffleXor => .non_uniform,
+ .OpGroupNonUniformShuffleUp => .non_uniform,
+ .OpGroupNonUniformShuffleDown => .non_uniform,
+ .OpGroupNonUniformIAdd => .non_uniform,
+ .OpGroupNonUniformFAdd => .non_uniform,
+ .OpGroupNonUniformIMul => .non_uniform,
+ .OpGroupNonUniformFMul => .non_uniform,
+ .OpGroupNonUniformSMin => .non_uniform,
+ .OpGroupNonUniformUMin => .non_uniform,
+ .OpGroupNonUniformFMin => .non_uniform,
+ .OpGroupNonUniformSMax => .non_uniform,
+ .OpGroupNonUniformUMax => .non_uniform,
+ .OpGroupNonUniformFMax => .non_uniform,
+ .OpGroupNonUniformBitwiseAnd => .non_uniform,
+ .OpGroupNonUniformBitwiseOr => .non_uniform,
+ .OpGroupNonUniformBitwiseXor => .non_uniform,
+ .OpGroupNonUniformLogicalAnd => .non_uniform,
+ .OpGroupNonUniformLogicalOr => .non_uniform,
+ .OpGroupNonUniformLogicalXor => .non_uniform,
+ .OpGroupNonUniformQuadBroadcast => .non_uniform,
+ .OpGroupNonUniformQuadSwap => .non_uniform,
+ .OpCopyLogical => .composite,
+ .OpPtrEqual => .memory,
+ .OpPtrNotEqual => .memory,
+ .OpPtrDiff => .memory,
+ .OpColorAttachmentReadEXT => .image,
+ .OpDepthAttachmentReadEXT => .image,
+ .OpStencilAttachmentReadEXT => .image,
+ .OpTypeTensorARM => .type_declaration,
+ .OpTensorReadARM => .tensor,
+ .OpTensorWriteARM => .tensor,
+ .OpTensorQuerySizeARM => .tensor,
+ .OpGraphConstantARM => .graph,
+ .OpGraphEntryPointARM => .graph,
+ .OpGraphARM => .graph,
+ .OpGraphInputARM => .graph,
+ .OpGraphSetOutputARM => .graph,
+ .OpGraphEndARM => .graph,
+ .OpTypeGraphARM => .type_declaration,
+ .OpTerminateInvocation => .control_flow,
+ .OpTypeUntypedPointerKHR => .type_declaration,
+ .OpUntypedVariableKHR => .memory,
+ .OpUntypedAccessChainKHR => .memory,
+ .OpUntypedInBoundsAccessChainKHR => .memory,
+ .OpSubgroupBallotKHR => .group,
+ .OpSubgroupFirstInvocationKHR => .group,
+ .OpUntypedPtrAccessChainKHR => .memory,
+ .OpUntypedInBoundsPtrAccessChainKHR => .memory,
+ .OpUntypedArrayLengthKHR => .memory,
+ .OpUntypedPrefetchKHR => .memory,
+ .OpSubgroupAllKHR => .group,
+ .OpSubgroupAnyKHR => .group,
+ .OpSubgroupAllEqualKHR => .group,
+ .OpGroupNonUniformRotateKHR => .group,
+ .OpSubgroupReadInvocationKHR => .group,
+ .OpExtInstWithForwardRefsKHR => .extension,
+ .OpTraceRayKHR => .reserved,
+ .OpExecuteCallableKHR => .reserved,
+ .OpConvertUToAccelerationStructureKHR => .reserved,
+ .OpIgnoreIntersectionKHR => .reserved,
+ .OpTerminateRayKHR => .reserved,
+ .OpSDot => .arithmetic,
+ .OpUDot => .arithmetic,
+ .OpSUDot => .arithmetic,
+ .OpSDotAccSat => .arithmetic,
+ .OpUDotAccSat => .arithmetic,
+ .OpSUDotAccSat => .arithmetic,
+ .OpTypeCooperativeMatrixKHR => .type_declaration,
+ .OpCooperativeMatrixLoadKHR => .memory,
+ .OpCooperativeMatrixStoreKHR => .memory,
+ .OpCooperativeMatrixMulAddKHR => .arithmetic,
+ .OpCooperativeMatrixLengthKHR => .miscellaneous,
+ .OpConstantCompositeReplicateEXT => .constant_creation,
+ .OpSpecConstantCompositeReplicateEXT => .constant_creation,
+ .OpCompositeConstructReplicateEXT => .composite,
+ .OpTypeRayQueryKHR => .type_declaration,
+ .OpRayQueryInitializeKHR => .reserved,
+ .OpRayQueryTerminateKHR => .reserved,
+ .OpRayQueryGenerateIntersectionKHR => .reserved,
+ .OpRayQueryConfirmIntersectionKHR => .reserved,
+ .OpRayQueryProceedKHR => .reserved,
+ .OpRayQueryGetIntersectionTypeKHR => .reserved,
+ .OpImageSampleWeightedQCOM => .image,
+ .OpImageBoxFilterQCOM => .image,
+ .OpImageBlockMatchSSDQCOM => .image,
+ .OpImageBlockMatchSADQCOM => .image,
+ .OpImageBlockMatchWindowSSDQCOM => .image,
+ .OpImageBlockMatchWindowSADQCOM => .image,
+ .OpImageBlockMatchGatherSSDQCOM => .image,
+ .OpImageBlockMatchGatherSADQCOM => .image,
+ .OpGroupIAddNonUniformAMD => .group,
+ .OpGroupFAddNonUniformAMD => .group,
+ .OpGroupFMinNonUniformAMD => .group,
+ .OpGroupUMinNonUniformAMD => .group,
+ .OpGroupSMinNonUniformAMD => .group,
+ .OpGroupFMaxNonUniformAMD => .group,
+ .OpGroupUMaxNonUniformAMD => .group,
+ .OpGroupSMaxNonUniformAMD => .group,
+ .OpFragmentMaskFetchAMD => .reserved,
+ .OpFragmentFetchAMD => .reserved,
+ .OpReadClockKHR => .reserved,
+ .OpAllocateNodePayloadsAMDX => .reserved,
+ .OpEnqueueNodePayloadsAMDX => .reserved,
+ .OpTypeNodePayloadArrayAMDX => .reserved,
+ .OpFinishWritingNodePayloadAMDX => .reserved,
+ .OpNodePayloadArrayLengthAMDX => .reserved,
+ .OpIsNodePayloadValidAMDX => .reserved,
+ .OpConstantStringAMDX => .reserved,
+ .OpSpecConstantStringAMDX => .reserved,
+ .OpGroupNonUniformQuadAllKHR => .non_uniform,
+ .OpGroupNonUniformQuadAnyKHR => .non_uniform,
+ .OpHitObjectRecordHitMotionNV => .reserved,
+ .OpHitObjectRecordHitWithIndexMotionNV => .reserved,
+ .OpHitObjectRecordMissMotionNV => .reserved,
+ .OpHitObjectGetWorldToObjectNV => .reserved,
+ .OpHitObjectGetObjectToWorldNV => .reserved,
+ .OpHitObjectGetObjectRayDirectionNV => .reserved,
+ .OpHitObjectGetObjectRayOriginNV => .reserved,
+ .OpHitObjectTraceRayMotionNV => .reserved,
+ .OpHitObjectGetShaderRecordBufferHandleNV => .reserved,
+ .OpHitObjectGetShaderBindingTableRecordIndexNV => .reserved,
+ .OpHitObjectRecordEmptyNV => .reserved,
+ .OpHitObjectTraceRayNV => .reserved,
+ .OpHitObjectRecordHitNV => .reserved,
+ .OpHitObjectRecordHitWithIndexNV => .reserved,
+ .OpHitObjectRecordMissNV => .reserved,
+ .OpHitObjectExecuteShaderNV => .reserved,
+ .OpHitObjectGetCurrentTimeNV => .reserved,
+ .OpHitObjectGetAttributesNV => .reserved,
+ .OpHitObjectGetHitKindNV => .reserved,
+ .OpHitObjectGetPrimitiveIndexNV => .reserved,
+ .OpHitObjectGetGeometryIndexNV => .reserved,
+ .OpHitObjectGetInstanceIdNV => .reserved,
+ .OpHitObjectGetInstanceCustomIndexNV => .reserved,
+ .OpHitObjectGetWorldRayDirectionNV => .reserved,
+ .OpHitObjectGetWorldRayOriginNV => .reserved,
+ .OpHitObjectGetRayTMaxNV => .reserved,
+ .OpHitObjectGetRayTMinNV => .reserved,
+ .OpHitObjectIsEmptyNV => .reserved,
+ .OpHitObjectIsHitNV => .reserved,
+ .OpHitObjectIsMissNV => .reserved,
+ .OpReorderThreadWithHitObjectNV => .reserved,
+ .OpReorderThreadWithHintNV => .reserved,
+ .OpTypeHitObjectNV => .type_declaration,
+ .OpImageSampleFootprintNV => .image,
+ .OpTypeCooperativeVectorNV => .type_declaration,
+ .OpCooperativeVectorMatrixMulNV => .reserved,
+ .OpCooperativeVectorOuterProductAccumulateNV => .reserved,
+ .OpCooperativeVectorReduceSumAccumulateNV => .reserved,
+ .OpCooperativeVectorMatrixMulAddNV => .reserved,
+ .OpCooperativeMatrixConvertNV => .conversion,
+ .OpEmitMeshTasksEXT => .reserved,
+ .OpSetMeshOutputsEXT => .reserved,
+ .OpGroupNonUniformPartitionNV => .non_uniform,
+ .OpWritePackedPrimitiveIndices4x8NV => .reserved,
+ .OpFetchMicroTriangleVertexPositionNV => .reserved,
+ .OpFetchMicroTriangleVertexBarycentricNV => .reserved,
+ .OpCooperativeVectorLoadNV => .memory,
+ .OpCooperativeVectorStoreNV => .memory,
+ .OpReportIntersectionKHR => .reserved,
+ .OpIgnoreIntersectionNV => .reserved,
+ .OpTerminateRayNV => .reserved,
+ .OpTraceNV => .reserved,
+ .OpTraceMotionNV => .reserved,
+ .OpTraceRayMotionNV => .reserved,
+ .OpRayQueryGetIntersectionTriangleVertexPositionsKHR => .reserved,
+ .OpTypeAccelerationStructureKHR => .type_declaration,
+ .OpExecuteCallableNV => .reserved,
+ .OpRayQueryGetClusterIdNV => .reserved,
+ .OpHitObjectGetClusterIdNV => .reserved,
+ .OpTypeCooperativeMatrixNV => .type_declaration,
+ .OpCooperativeMatrixLoadNV => .reserved,
+ .OpCooperativeMatrixStoreNV => .reserved,
+ .OpCooperativeMatrixMulAddNV => .reserved,
+ .OpCooperativeMatrixLengthNV => .reserved,
+ .OpBeginInvocationInterlockEXT => .reserved,
+ .OpEndInvocationInterlockEXT => .reserved,
+ .OpCooperativeMatrixReduceNV => .arithmetic,
+ .OpCooperativeMatrixLoadTensorNV => .memory,
+ .OpCooperativeMatrixStoreTensorNV => .memory,
+ .OpCooperativeMatrixPerElementOpNV => .function,
+ .OpTypeTensorLayoutNV => .type_declaration,
+ .OpTypeTensorViewNV => .type_declaration,
+ .OpCreateTensorLayoutNV => .reserved,
+ .OpTensorLayoutSetDimensionNV => .reserved,
+ .OpTensorLayoutSetStrideNV => .reserved,
+ .OpTensorLayoutSliceNV => .reserved,
+ .OpTensorLayoutSetClampValueNV => .reserved,
+ .OpCreateTensorViewNV => .reserved,
+ .OpTensorViewSetDimensionNV => .reserved,
+ .OpTensorViewSetStrideNV => .reserved,
+ .OpDemoteToHelperInvocation => .control_flow,
+ .OpIsHelperInvocationEXT => .reserved,
+ .OpTensorViewSetClipNV => .reserved,
+ .OpTensorLayoutSetBlockSizeNV => .reserved,
+ .OpCooperativeMatrixTransposeNV => .conversion,
+ .OpConvertUToImageNV => .reserved,
+ .OpConvertUToSamplerNV => .reserved,
+ .OpConvertImageToUNV => .reserved,
+ .OpConvertSamplerToUNV => .reserved,
+ .OpConvertUToSampledImageNV => .reserved,
+ .OpConvertSampledImageToUNV => .reserved,
+ .OpSamplerImageAddressingModeNV => .reserved,
+ .OpRawAccessChainNV => .memory,
+ .OpRayQueryGetIntersectionSpherePositionNV => .reserved,
+ .OpRayQueryGetIntersectionSphereRadiusNV => .reserved,
+ .OpRayQueryGetIntersectionLSSPositionsNV => .reserved,
+ .OpRayQueryGetIntersectionLSSRadiiNV => .reserved,
+ .OpRayQueryGetIntersectionLSSHitValueNV => .reserved,
+ .OpHitObjectGetSpherePositionNV => .reserved,
+ .OpHitObjectGetSphereRadiusNV => .reserved,
+ .OpHitObjectGetLSSPositionsNV => .reserved,
+ .OpHitObjectGetLSSRadiiNV => .reserved,
+ .OpHitObjectIsSphereHitNV => .reserved,
+ .OpHitObjectIsLSSHitNV => .reserved,
+ .OpRayQueryIsSphereHitNV => .reserved,
+ .OpRayQueryIsLSSHitNV => .reserved,
+ .OpSubgroupShuffleINTEL => .group,
+ .OpSubgroupShuffleDownINTEL => .group,
+ .OpSubgroupShuffleUpINTEL => .group,
+ .OpSubgroupShuffleXorINTEL => .group,
+ .OpSubgroupBlockReadINTEL => .group,
+ .OpSubgroupBlockWriteINTEL => .group,
+ .OpSubgroupImageBlockReadINTEL => .group,
+ .OpSubgroupImageBlockWriteINTEL => .group,
+ .OpSubgroupImageMediaBlockReadINTEL => .group,
+ .OpSubgroupImageMediaBlockWriteINTEL => .group,
+ .OpUCountLeadingZerosINTEL => .reserved,
+ .OpUCountTrailingZerosINTEL => .reserved,
+ .OpAbsISubINTEL => .reserved,
+ .OpAbsUSubINTEL => .reserved,
+ .OpIAddSatINTEL => .reserved,
+ .OpUAddSatINTEL => .reserved,
+ .OpIAverageINTEL => .reserved,
+ .OpUAverageINTEL => .reserved,
+ .OpIAverageRoundedINTEL => .reserved,
+ .OpUAverageRoundedINTEL => .reserved,
+ .OpISubSatINTEL => .reserved,
+ .OpUSubSatINTEL => .reserved,
+ .OpIMul32x16INTEL => .reserved,
+ .OpUMul32x16INTEL => .reserved,
+ .OpAtomicFMinEXT => .atomic,
+ .OpAtomicFMaxEXT => .atomic,
+ .OpAssumeTrueKHR => .miscellaneous,
+ .OpExpectKHR => .miscellaneous,
+ .OpDecorateString => .annotation,
+ .OpMemberDecorateString => .annotation,
+ .OpLoopControlINTEL => .reserved,
+ .OpReadPipeBlockingINTEL => .pipe,
+ .OpWritePipeBlockingINTEL => .pipe,
+ .OpFPGARegINTEL => .reserved,
+ .OpRayQueryGetRayTMinKHR => .reserved,
+ .OpRayQueryGetRayFlagsKHR => .reserved,
+ .OpRayQueryGetIntersectionTKHR => .reserved,
+ .OpRayQueryGetIntersectionInstanceCustomIndexKHR => .reserved,
+ .OpRayQueryGetIntersectionInstanceIdKHR => .reserved,
+ .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => .reserved,
+ .OpRayQueryGetIntersectionGeometryIndexKHR => .reserved,
+ .OpRayQueryGetIntersectionPrimitiveIndexKHR => .reserved,
+ .OpRayQueryGetIntersectionBarycentricsKHR => .reserved,
+ .OpRayQueryGetIntersectionFrontFaceKHR => .reserved,
+ .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => .reserved,
+ .OpRayQueryGetIntersectionObjectRayDirectionKHR => .reserved,
+ .OpRayQueryGetIntersectionObjectRayOriginKHR => .reserved,
+ .OpRayQueryGetWorldRayDirectionKHR => .reserved,
+ .OpRayQueryGetWorldRayOriginKHR => .reserved,
+ .OpRayQueryGetIntersectionObjectToWorldKHR => .reserved,
+ .OpRayQueryGetIntersectionWorldToObjectKHR => .reserved,
+ .OpAtomicFAddEXT => .atomic,
+ .OpTypeBufferSurfaceINTEL => .type_declaration,
+ .OpTypeStructContinuedINTEL => .type_declaration,
+ .OpConstantCompositeContinuedINTEL => .constant_creation,
+ .OpSpecConstantCompositeContinuedINTEL => .constant_creation,
+ .OpCompositeConstructContinuedINTEL => .composite,
+ .OpConvertFToBF16INTEL => .conversion,
+ .OpConvertBF16ToFINTEL => .conversion,
+ .OpControlBarrierArriveINTEL => .barrier,
+ .OpControlBarrierWaitINTEL => .barrier,
+ .OpArithmeticFenceEXT => .miscellaneous,
+ .OpTaskSequenceCreateINTEL => .reserved,
+ .OpTaskSequenceAsyncINTEL => .reserved,
+ .OpTaskSequenceGetINTEL => .reserved,
+ .OpTaskSequenceReleaseINTEL => .reserved,
+ .OpTypeTaskSequenceINTEL => .type_declaration,
+ .OpSubgroupBlockPrefetchINTEL => .group,
+ .OpSubgroup2DBlockLoadINTEL => .group,
+ .OpSubgroup2DBlockLoadTransformINTEL => .group,
+ .OpSubgroup2DBlockLoadTransposeINTEL => .group,
+ .OpSubgroup2DBlockPrefetchINTEL => .group,
+ .OpSubgroup2DBlockStoreINTEL => .group,
+ .OpSubgroupMatrixMultiplyAccumulateINTEL => .group,
+ .OpBitwiseFunctionINTEL => .bit,
+ .OpGroupIMulKHR => .group,
+ .OpGroupFMulKHR => .group,
+ .OpGroupBitwiseAndKHR => .group,
+ .OpGroupBitwiseOrKHR => .group,
+ .OpGroupBitwiseXorKHR => .group,
+ .OpGroupLogicalAndKHR => .group,
+ .OpGroupLogicalOrKHR => .group,
+ .OpGroupLogicalXorKHR => .group,
+ .OpRoundFToTF32INTEL => .conversion,
+ .OpMaskedGatherINTEL => .memory,
+ .OpMaskedScatterINTEL => .memory,
+ .OpConvertHandleToImageINTEL => .image,
+ .OpConvertHandleToSamplerINTEL => .image,
+ .OpConvertHandleToSampledImageINTEL => .image,
+ };
+ }
+};
+pub const ImageOperands = packed struct {
+ bias: bool = false,
+ lod: bool = false,
+ grad: bool = false,
+ const_offset: bool = false,
+ offset: bool = false,
+ const_offsets: bool = false,
+ sample: bool = false,
+ min_lod: bool = false,
+ make_texel_available: bool = false,
+ make_texel_visible: bool = false,
+ non_private_texel: bool = false,
+ volatile_texel: bool = false,
+ sign_extend: bool = false,
+ zero_extend: bool = false,
+ nontemporal: bool = false,
+ _reserved_bit_15: bool = false,
+ offsets: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ bias: ?struct { id_ref: Id } = null,
+ lod: ?struct { id_ref: Id } = null,
+ grad: ?struct { id_ref_0: Id, id_ref_1: Id } = null,
+ const_offset: ?struct { id_ref: Id } = null,
+ offset: ?struct { id_ref: Id } = null,
+ const_offsets: ?struct { id_ref: Id } = null,
+ sample: ?struct { id_ref: Id } = null,
+ min_lod: ?struct { id_ref: Id } = null,
+ make_texel_available: ?struct { id_scope: Id } = null,
+ make_texel_visible: ?struct { id_scope: Id } = null,
+ non_private_texel: bool = false,
+ volatile_texel: bool = false,
+ sign_extend: bool = false,
+ zero_extend: bool = false,
+ nontemporal: bool = false,
+ _reserved_bit_15: bool = false,
+ offsets: ?struct { id_ref: Id } = null,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
+};
+pub const FPFastMathMode = packed struct {
+ not_na_n: bool = false,
+ not_inf: bool = false,
+ nsz: bool = false,
+ allow_recip: bool = false,
+ fast: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ allow_contract: bool = false,
+ allow_reassoc: bool = false,
+ allow_transform: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const SelectionControl = packed struct {
+ flatten: bool = false,
+ dont_flatten: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const LoopControl = packed struct {
+ unroll: bool = false,
+ dont_unroll: bool = false,
+ dependency_infinite: bool = false,
+ dependency_length: bool = false,
+ min_iterations: bool = false,
+ max_iterations: bool = false,
+ iteration_multiple: bool = false,
+ peel_count: bool = false,
+ partial_count: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ initiation_interval_intel: bool = false,
+ max_concurrency_intel: bool = false,
+ dependency_array_intel: bool = false,
+ pipeline_enable_intel: bool = false,
+ loop_coalesce_intel: bool = false,
+ max_interleaving_intel: bool = false,
+ speculated_iterations_intel: bool = false,
+ no_fusion_intel: bool = false,
+ loop_count_intel: bool = false,
+ max_reinvocation_delay_intel: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ unroll: bool = false,
+ dont_unroll: bool = false,
+ dependency_infinite: bool = false,
+ dependency_length: ?struct { literal_integer: LiteralInteger } = null,
+ min_iterations: ?struct { literal_integer: LiteralInteger } = null,
+ max_iterations: ?struct { literal_integer: LiteralInteger } = null,
+ iteration_multiple: ?struct { literal_integer: LiteralInteger } = null,
+ peel_count: ?struct { literal_integer: LiteralInteger } = null,
+ partial_count: ?struct { literal_integer: LiteralInteger } = null,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ initiation_interval_intel: ?struct { literal_integer: LiteralInteger } = null,
+ max_concurrency_intel: ?struct { literal_integer: LiteralInteger } = null,
+ dependency_array_intel: ?struct { literal_integer: LiteralInteger } = null,
+ pipeline_enable_intel: ?struct { literal_integer: LiteralInteger } = null,
+ loop_coalesce_intel: ?struct { literal_integer: LiteralInteger } = null,
+ max_interleaving_intel: ?struct { literal_integer: LiteralInteger } = null,
+ speculated_iterations_intel: ?struct { literal_integer: LiteralInteger } = null,
+ no_fusion_intel: bool = false,
+ loop_count_intel: ?struct { literal_integer: LiteralInteger } = null,
+ max_reinvocation_delay_intel: ?struct { literal_integer: LiteralInteger } = null,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
+};
+pub const FunctionControl = packed struct {
+ @"inline": bool = false,
+ dont_inline: bool = false,
+ pure: bool = false,
+ @"const": bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ opt_none_ext: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const MemorySemantics = packed struct {
+ _reserved_bit_0: bool = false,
+ acquire: bool = false,
+ release: bool = false,
+ acquire_release: bool = false,
+ sequentially_consistent: bool = false,
+ _reserved_bit_5: bool = false,
+ uniform_memory: bool = false,
+ subgroup_memory: bool = false,
+ workgroup_memory: bool = false,
+ cross_workgroup_memory: bool = false,
+ atomic_counter_memory: bool = false,
+ image_memory: bool = false,
+ output_memory: bool = false,
+ make_available: bool = false,
+ make_visible: bool = false,
+ @"volatile": bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const MemoryAccess = packed struct {
+ @"volatile": bool = false,
+ aligned: bool = false,
+ nontemporal: bool = false,
+ make_pointer_available: bool = false,
+ make_pointer_visible: bool = false,
+ non_private_pointer: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ alias_scope_intel_mask: bool = false,
+ no_alias_intel_mask: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ @"volatile": bool = false,
+ aligned: ?struct { literal_integer: LiteralInteger } = null,
+ nontemporal: bool = false,
+ make_pointer_available: ?struct { id_scope: Id } = null,
+ make_pointer_visible: ?struct { id_scope: Id } = null,
+ non_private_pointer: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ alias_scope_intel_mask: ?struct { id_ref: Id } = null,
+ no_alias_intel_mask: ?struct { id_ref: Id } = null,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
+};
+pub const KernelProfilingInfo = packed struct {
+ cmd_exec_time: bool = false,
+ _reserved_bit_1: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const RayFlags = packed struct {
+ opaque_khr: bool = false,
+ no_opaque_khr: bool = false,
+ terminate_on_first_hit_khr: bool = false,
+ skip_closest_hit_shader_khr: bool = false,
+ cull_back_facing_triangles_khr: bool = false,
+ cull_front_facing_triangles_khr: bool = false,
+ cull_opaque_khr: bool = false,
+ cull_no_opaque_khr: bool = false,
+ skip_triangles_khr: bool = false,
+ skip_aab_bs_khr: bool = false,
+ force_opacity_micromap2state_ext: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const FragmentShadingRate = packed struct {
+ vertical2pixels: bool = false,
+ vertical4pixels: bool = false,
+ horizontal2pixels: bool = false,
+ horizontal4pixels: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const RawAccessChainOperands = packed struct {
+ robustness_per_component_nv: bool = false,
+ robustness_per_element_nv: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const SourceLanguage = enum(u32) {
+ unknown = 0,
+ essl = 1,
+ glsl = 2,
+ open_cl_c = 3,
+ open_cl_cpp = 4,
+ hlsl = 5,
+ cpp_for_open_cl = 6,
+ sycl = 7,
+ hero_c = 8,
+ nzsl = 9,
+ wgsl = 10,
+ slang = 11,
+ zig = 12,
+ rust = 13,
+};
+pub const ExecutionModel = enum(u32) {
+ vertex = 0,
+ tessellation_control = 1,
+ tessellation_evaluation = 2,
+ geometry = 3,
+ fragment = 4,
+ gl_compute = 5,
+ kernel = 6,
+ task_nv = 5267,
+ mesh_nv = 5268,
+ ray_generation_khr = 5313,
+ intersection_khr = 5314,
+ any_hit_khr = 5315,
+ closest_hit_khr = 5316,
+ miss_khr = 5317,
+ callable_khr = 5318,
+ task_ext = 5364,
+ mesh_ext = 5365,
+};
+pub const AddressingModel = enum(u32) {
+ logical = 0,
+ physical32 = 1,
+ physical64 = 2,
+ physical_storage_buffer64 = 5348,
+};
+pub const MemoryModel = enum(u32) {
+ simple = 0,
+ glsl450 = 1,
+ open_cl = 2,
+ vulkan = 3,
+};
+pub const ExecutionMode = enum(u32) {
+ invocations = 0,
+ spacing_equal = 1,
+ spacing_fractional_even = 2,
+ spacing_fractional_odd = 3,
+ vertex_order_cw = 4,
+ vertex_order_ccw = 5,
+ pixel_center_integer = 6,
+ origin_upper_left = 7,
+ origin_lower_left = 8,
+ early_fragment_tests = 9,
+ point_mode = 10,
+ xfb = 11,
+ depth_replacing = 12,
+ depth_greater = 14,
+ depth_less = 15,
+ depth_unchanged = 16,
+ local_size = 17,
+ local_size_hint = 18,
+ input_points = 19,
+ input_lines = 20,
+ input_lines_adjacency = 21,
+ triangles = 22,
+ input_triangles_adjacency = 23,
+ quads = 24,
+ isolines = 25,
+ output_vertices = 26,
+ output_points = 27,
+ output_line_strip = 28,
+ output_triangle_strip = 29,
+ vec_type_hint = 30,
+ contraction_off = 31,
+ initializer = 33,
+ finalizer = 34,
+ subgroup_size = 35,
+ subgroups_per_workgroup = 36,
+ subgroups_per_workgroup_id = 37,
+ local_size_id = 38,
+ local_size_hint_id = 39,
+ non_coherent_color_attachment_read_ext = 4169,
+ non_coherent_depth_attachment_read_ext = 4170,
+ non_coherent_stencil_attachment_read_ext = 4171,
+ subgroup_uniform_control_flow_khr = 4421,
+ post_depth_coverage = 4446,
+ denorm_preserve = 4459,
+ denorm_flush_to_zero = 4460,
+ signed_zero_inf_nan_preserve = 4461,
+ rounding_mode_rte = 4462,
+ rounding_mode_rtz = 4463,
+ non_coherent_tile_attachment_read_qcom = 4489,
+ tile_shading_rate_qcom = 4490,
+ early_and_late_fragment_tests_amd = 5017,
+ stencil_ref_replacing_ext = 5027,
+ coalescing_amdx = 5069,
+ is_api_entry_amdx = 5070,
+ max_node_recursion_amdx = 5071,
+ static_num_workgroups_amdx = 5072,
+ shader_index_amdx = 5073,
+ max_num_workgroups_amdx = 5077,
+ stencil_ref_unchanged_front_amd = 5079,
+ stencil_ref_greater_front_amd = 5080,
+ stencil_ref_less_front_amd = 5081,
+ stencil_ref_unchanged_back_amd = 5082,
+ stencil_ref_greater_back_amd = 5083,
+ stencil_ref_less_back_amd = 5084,
+ quad_derivatives_khr = 5088,
+ require_full_quads_khr = 5089,
+ shares_input_with_amdx = 5102,
+ output_lines_ext = 5269,
+ output_primitives_ext = 5270,
+ derivative_group_quads_khr = 5289,
+ derivative_group_linear_khr = 5290,
+ output_triangles_ext = 5298,
+ pixel_interlock_ordered_ext = 5366,
+ pixel_interlock_unordered_ext = 5367,
+ sample_interlock_ordered_ext = 5368,
+ sample_interlock_unordered_ext = 5369,
+ shading_rate_interlock_ordered_ext = 5370,
+ shading_rate_interlock_unordered_ext = 5371,
+ shared_local_memory_size_intel = 5618,
+ rounding_mode_rtpintel = 5620,
+ rounding_mode_rtnintel = 5621,
+ floating_point_mode_altintel = 5622,
+ floating_point_mode_ieeeintel = 5623,
+ max_workgroup_size_intel = 5893,
+ max_work_dim_intel = 5894,
+ no_global_offset_intel = 5895,
+ num_simd_workitems_intel = 5896,
+ scheduler_target_fmax_mhz_intel = 5903,
+ maximally_reconverges_khr = 6023,
+ fp_fast_math_default = 6028,
+ streaming_interface_intel = 6154,
+ register_map_interface_intel = 6160,
+ named_barrier_count_intel = 6417,
+ maximum_registers_intel = 6461,
+ maximum_registers_id_intel = 6462,
+ named_maximum_registers_intel = 6463,
+
+ pub const Extended = union(ExecutionMode) {
+ invocations: struct { literal_integer: LiteralInteger },
+ spacing_equal,
+ spacing_fractional_even,
+ spacing_fractional_odd,
+ vertex_order_cw,
+ vertex_order_ccw,
+ pixel_center_integer,
+ origin_upper_left,
+ origin_lower_left,
+ early_fragment_tests,
+ point_mode,
+ xfb,
+ depth_replacing,
+ depth_greater,
+ depth_less,
+ depth_unchanged,
+ local_size: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger },
+ local_size_hint: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger },
+ input_points,
+ input_lines,
+ input_lines_adjacency,
+ triangles,
+ input_triangles_adjacency,
+ quads,
+ isolines,
+ output_vertices: struct { vertex_count: LiteralInteger },
+ output_points,
+ output_line_strip,
+ output_triangle_strip,
+ vec_type_hint: struct { vector_type: LiteralInteger },
+ contraction_off,
+ initializer,
+ finalizer,
+ subgroup_size: struct { subgroup_size: LiteralInteger },
+ subgroups_per_workgroup: struct { subgroups_per_workgroup: LiteralInteger },
+ subgroups_per_workgroup_id: struct { subgroups_per_workgroup: Id },
+ local_size_id: struct { x_size: Id, y_size: Id, z_size: Id },
+ local_size_hint_id: struct { x_size_hint: Id, y_size_hint: Id, z_size_hint: Id },
+ non_coherent_color_attachment_read_ext,
+ non_coherent_depth_attachment_read_ext,
+ non_coherent_stencil_attachment_read_ext,
+ subgroup_uniform_control_flow_khr,
+ post_depth_coverage,
+ denorm_preserve: struct { target_width: LiteralInteger },
+ denorm_flush_to_zero: struct { target_width: LiteralInteger },
+ signed_zero_inf_nan_preserve: struct { target_width: LiteralInteger },
+ rounding_mode_rte: struct { target_width: LiteralInteger },
+ rounding_mode_rtz: struct { target_width: LiteralInteger },
+ non_coherent_tile_attachment_read_qcom,
+ tile_shading_rate_qcom: struct { x_rate: LiteralInteger, y_rate: LiteralInteger, z_rate: LiteralInteger },
+ early_and_late_fragment_tests_amd,
+ stencil_ref_replacing_ext,
+ coalescing_amdx,
+ is_api_entry_amdx: struct { is_entry: Id },
+ max_node_recursion_amdx: struct { number_of_recursions: Id },
+ static_num_workgroups_amdx: struct { x_size: Id, y_size: Id, z_size: Id },
+ shader_index_amdx: struct { shader_index: Id },
+ max_num_workgroups_amdx: struct { x_size: Id, y_size: Id, z_size: Id },
+ stencil_ref_unchanged_front_amd,
+ stencil_ref_greater_front_amd,
+ stencil_ref_less_front_amd,
+ stencil_ref_unchanged_back_amd,
+ stencil_ref_greater_back_amd,
+ stencil_ref_less_back_amd,
+ quad_derivatives_khr,
+ require_full_quads_khr,
+ shares_input_with_amdx: struct { node_name: Id, shader_index: Id },
+ output_lines_ext,
+ output_primitives_ext: struct { primitive_count: LiteralInteger },
+ derivative_group_quads_khr,
+ derivative_group_linear_khr,
+ output_triangles_ext,
+ pixel_interlock_ordered_ext,
+ pixel_interlock_unordered_ext,
+ sample_interlock_ordered_ext,
+ sample_interlock_unordered_ext,
+ shading_rate_interlock_ordered_ext,
+ shading_rate_interlock_unordered_ext,
+ shared_local_memory_size_intel: struct { size: LiteralInteger },
+ rounding_mode_rtpintel: struct { target_width: LiteralInteger },
+ rounding_mode_rtnintel: struct { target_width: LiteralInteger },
+ floating_point_mode_altintel: struct { target_width: LiteralInteger },
+ floating_point_mode_ieeeintel: struct { target_width: LiteralInteger },
+ max_workgroup_size_intel: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger, literal_integer_2: LiteralInteger },
+ max_work_dim_intel: struct { literal_integer: LiteralInteger },
+ no_global_offset_intel,
+ num_simd_workitems_intel: struct { literal_integer: LiteralInteger },
+ scheduler_target_fmax_mhz_intel: struct { literal_integer: LiteralInteger },
+ maximally_reconverges_khr,
+ fp_fast_math_default: struct { target_type: Id, id_ref_1: Id },
+ streaming_interface_intel: struct { stall_free_return: LiteralInteger },
+ register_map_interface_intel: struct { wait_for_done_write: LiteralInteger },
+ named_barrier_count_intel: struct { barrier_count: LiteralInteger },
+ maximum_registers_intel: struct { number_of_registers: LiteralInteger },
+ maximum_registers_id_intel: struct { number_of_registers: Id },
+ named_maximum_registers_intel: struct { named_maximum_number_of_registers: NamedMaximumNumberOfRegisters },
+ };
+};
+pub const StorageClass = enum(u32) {
+ uniform_constant = 0,
+ input = 1,
+ uniform = 2,
+ output = 3,
+ workgroup = 4,
+ cross_workgroup = 5,
+ private = 6,
+ function = 7,
+ generic = 8,
+ push_constant = 9,
+ atomic_counter = 10,
+ image = 11,
+ storage_buffer = 12,
+ tile_image_ext = 4172,
+ tile_attachment_qcom = 4491,
+ node_payload_amdx = 5068,
+ callable_data_khr = 5328,
+ incoming_callable_data_khr = 5329,
+ ray_payload_khr = 5338,
+ hit_attribute_khr = 5339,
+ incoming_ray_payload_khr = 5342,
+ shader_record_buffer_khr = 5343,
+ physical_storage_buffer = 5349,
+ hit_object_attribute_nv = 5385,
+ task_payload_workgroup_ext = 5402,
+ code_section_intel = 5605,
+ device_only_intel = 5936,
+ host_only_intel = 5937,
+};
+pub const Dim = enum(u32) {
+ @"1d" = 0,
+ @"2d" = 1,
+ @"3d" = 2,
+ cube = 3,
+ rect = 4,
+ buffer = 5,
+ subpass_data = 6,
+ tile_image_data_ext = 4173,
+};
+pub const SamplerAddressingMode = enum(u32) {
+ none = 0,
+ clamp_to_edge = 1,
+ clamp = 2,
+ repeat = 3,
+ repeat_mirrored = 4,
+};
+pub const SamplerFilterMode = enum(u32) {
+ nearest = 0,
+ linear = 1,
+};
+pub const ImageFormat = enum(u32) {
+ unknown = 0,
+ rgba32f = 1,
+ rgba16f = 2,
+ r32f = 3,
+ rgba8 = 4,
+ rgba8snorm = 5,
+ rg32f = 6,
+ rg16f = 7,
+ r11f_g11f_b10f = 8,
+ r16f = 9,
+ rgba16 = 10,
+ rgb10a2 = 11,
+ rg16 = 12,
+ rg8 = 13,
+ r16 = 14,
+ r8 = 15,
+ rgba16snorm = 16,
+ rg16snorm = 17,
+ rg8snorm = 18,
+ r16snorm = 19,
+ r8snorm = 20,
+ rgba32i = 21,
+ rgba16i = 22,
+ rgba8i = 23,
+ r32i = 24,
+ rg32i = 25,
+ rg16i = 26,
+ rg8i = 27,
+ r16i = 28,
+ r8i = 29,
+ rgba32ui = 30,
+ rgba16ui = 31,
+ rgba8ui = 32,
+ r32ui = 33,
+ rgb10a2ui = 34,
+ rg32ui = 35,
+ rg16ui = 36,
+ rg8ui = 37,
+ r16ui = 38,
+ r8ui = 39,
+ r64ui = 40,
+ r64i = 41,
+};
+pub const ImageChannelOrder = enum(u32) {
+ r = 0,
+ a = 1,
+ rg = 2,
+ ra = 3,
+ rgb = 4,
+ rgba = 5,
+ bgra = 6,
+ argb = 7,
+ intensity = 8,
+ luminance = 9,
+ rx = 10,
+ r_gx = 11,
+ rg_bx = 12,
+ depth = 13,
+ depth_stencil = 14,
+ s_rgb = 15,
+ s_rg_bx = 16,
+ s_rgba = 17,
+ s_bgra = 18,
+ abgr = 19,
+};
+pub const ImageChannelDataType = enum(u32) {
+ snorm_int8 = 0,
+ snorm_int16 = 1,
+ unorm_int8 = 2,
+ unorm_int16 = 3,
+ unorm_short565 = 4,
+ unorm_short555 = 5,
+ unorm_int101010 = 6,
+ signed_int8 = 7,
+ signed_int16 = 8,
+ signed_int32 = 9,
+ unsigned_int8 = 10,
+ unsigned_int16 = 11,
+ unsigned_int32 = 12,
+ half_float = 13,
+ float = 14,
+ unorm_int24 = 15,
+ unorm_int101010_2 = 16,
+ unorm_int10x6ext = 17,
+ unsigned_int_raw10ext = 19,
+ unsigned_int_raw12ext = 20,
+ unorm_int2_101010ext = 21,
+ unsigned_int10x6ext = 22,
+ unsigned_int12x4ext = 23,
+ unsigned_int14x2ext = 24,
+ unorm_int12x4ext = 25,
+ unorm_int14x2ext = 26,
+};
+pub const FPRoundingMode = enum(u32) {
+ rte = 0,
+ rtz = 1,
+ rtp = 2,
+ rtn = 3,
+};
+pub const FPDenormMode = enum(u32) {
+ preserve = 0,
+ flush_to_zero = 1,
+};
+pub const QuantizationModes = enum(u32) {
+ trn = 0,
+ trn_zero = 1,
+ rnd = 2,
+ rnd_zero = 3,
+ rnd_inf = 4,
+ rnd_min_inf = 5,
+ rnd_conv = 6,
+ rnd_conv_odd = 7,
+};
+pub const FPOperationMode = enum(u32) {
+ ieee = 0,
+ alt = 1,
+};
+pub const OverflowModes = enum(u32) {
+ wrap = 0,
+ sat = 1,
+ sat_zero = 2,
+ sat_sym = 3,
+};
+pub const LinkageType = enum(u32) {
+ @"export" = 0,
+ import = 1,
+ link_once_odr = 2,
+};
+pub const AccessQualifier = enum(u32) {
+ read_only = 0,
+ write_only = 1,
+ read_write = 2,
+};
+pub const HostAccessQualifier = enum(u32) {
+ none_intel = 0,
+ read_intel = 1,
+ write_intel = 2,
+ read_write_intel = 3,
+};
+pub const FunctionParameterAttribute = enum(u32) {
+ zext = 0,
+ sext = 1,
+ by_val = 2,
+ sret = 3,
+ no_alias = 4,
+ no_capture = 5,
+ no_write = 6,
+ no_read_write = 7,
+ runtime_aligned_intel = 5940,
+};
+pub const Decoration = enum(u32) {
+ relaxed_precision = 0,
+ spec_id = 1,
+ block = 2,
+ buffer_block = 3,
+ row_major = 4,
+ col_major = 5,
+ array_stride = 6,
+ matrix_stride = 7,
+ glsl_shared = 8,
+ glsl_packed = 9,
+ c_packed = 10,
+ built_in = 11,
+ no_perspective = 13,
+ flat = 14,
+ patch = 15,
+ centroid = 16,
+ sample = 17,
+ invariant = 18,
+ restrict = 19,
+ aliased = 20,
+ @"volatile" = 21,
+ constant = 22,
+ coherent = 23,
+ non_writable = 24,
+ non_readable = 25,
+ uniform = 26,
+ uniform_id = 27,
+ saturated_conversion = 28,
+ stream = 29,
+ location = 30,
+ component = 31,
+ index = 32,
+ binding = 33,
+ descriptor_set = 34,
+ offset = 35,
+ xfb_buffer = 36,
+ xfb_stride = 37,
+ func_param_attr = 38,
+ fp_rounding_mode = 39,
+ fp_fast_math_mode = 40,
+ linkage_attributes = 41,
+ no_contraction = 42,
+ input_attachment_index = 43,
+ alignment = 44,
+ max_byte_offset = 45,
+ alignment_id = 46,
+ max_byte_offset_id = 47,
+ saturated_to_largest_float8normal_conversion_ext = 4216,
+ no_signed_wrap = 4469,
+ no_unsigned_wrap = 4470,
+ weight_texture_qcom = 4487,
+ block_match_texture_qcom = 4488,
+ block_match_sampler_qcom = 4499,
+ explicit_interp_amd = 4999,
+ node_shares_payload_limits_with_amdx = 5019,
+ node_max_payloads_amdx = 5020,
+ track_finish_writing_amdx = 5078,
+ payload_node_name_amdx = 5091,
+ payload_node_base_index_amdx = 5098,
+ payload_node_sparse_array_amdx = 5099,
+ payload_node_array_size_amdx = 5100,
+ payload_dispatch_indirect_amdx = 5105,
+ override_coverage_nv = 5248,
+ passthrough_nv = 5250,
+ viewport_relative_nv = 5252,
+ secondary_viewport_relative_nv = 5256,
+ per_primitive_ext = 5271,
+ per_view_nv = 5272,
+ per_task_nv = 5273,
+ per_vertex_khr = 5285,
+ non_uniform = 5300,
+ restrict_pointer = 5355,
+ aliased_pointer = 5356,
+ hit_object_shader_record_buffer_nv = 5386,
+ bindless_sampler_nv = 5398,
+ bindless_image_nv = 5399,
+ bound_sampler_nv = 5400,
+ bound_image_nv = 5401,
+ simt_call_intel = 5599,
+ referenced_indirectly_intel = 5602,
+ clobber_intel = 5607,
+ side_effects_intel = 5608,
+ vector_compute_variable_intel = 5624,
+ func_param_io_kind_intel = 5625,
+ vector_compute_function_intel = 5626,
+ stack_call_intel = 5627,
+ global_variable_offset_intel = 5628,
+ counter_buffer = 5634,
+ user_semantic = 5635,
+ user_type_google = 5636,
+ function_rounding_mode_intel = 5822,
+ function_denorm_mode_intel = 5823,
+ register_intel = 5825,
+ memory_intel = 5826,
+ numbanks_intel = 5827,
+ bankwidth_intel = 5828,
+ max_private_copies_intel = 5829,
+ singlepump_intel = 5830,
+ doublepump_intel = 5831,
+ max_replicates_intel = 5832,
+ simple_dual_port_intel = 5833,
+ merge_intel = 5834,
+ bank_bits_intel = 5835,
+ force_pow2depth_intel = 5836,
+ stridesize_intel = 5883,
+ wordsize_intel = 5884,
+ true_dual_port_intel = 5885,
+ burst_coalesce_intel = 5899,
+ cache_size_intel = 5900,
+ dont_statically_coalesce_intel = 5901,
+ prefetch_intel = 5902,
+ stall_enable_intel = 5905,
+ fuse_loops_in_function_intel = 5907,
+ math_op_dsp_mode_intel = 5909,
+ alias_scope_intel = 5914,
+ no_alias_intel = 5915,
+ initiation_interval_intel = 5917,
+ max_concurrency_intel = 5918,
+ pipeline_enable_intel = 5919,
+ buffer_location_intel = 5921,
+ io_pipe_storage_intel = 5944,
+ function_floating_point_mode_intel = 6080,
+ single_element_vector_intel = 6085,
+ vector_compute_callable_function_intel = 6087,
+ media_block_iointel = 6140,
+ stall_free_intel = 6151,
+ fp_max_error_decoration_intel = 6170,
+ latency_control_label_intel = 6172,
+ latency_control_constraint_intel = 6173,
+ conduit_kernel_argument_intel = 6175,
+ register_map_kernel_argument_intel = 6176,
+ mm_host_interface_address_width_intel = 6177,
+ mm_host_interface_data_width_intel = 6178,
+ mm_host_interface_latency_intel = 6179,
+ mm_host_interface_read_write_mode_intel = 6180,
+ mm_host_interface_max_burst_intel = 6181,
+ mm_host_interface_wait_request_intel = 6182,
+ stable_kernel_argument_intel = 6183,
+ host_access_intel = 6188,
+ init_mode_intel = 6190,
+ implement_in_register_map_intel = 6191,
+ cache_control_load_intel = 6442,
+ cache_control_store_intel = 6443,
+
+ pub const Extended = union(Decoration) {
+ relaxed_precision,
+ spec_id: struct { specialization_constant_id: LiteralInteger },
+ block,
+ buffer_block,
+ row_major,
+ col_major,
+ array_stride: struct { array_stride: LiteralInteger },
+ matrix_stride: struct { matrix_stride: LiteralInteger },
+ glsl_shared,
+ glsl_packed,
+ c_packed,
+ built_in: struct { built_in: BuiltIn },
+ no_perspective,
+ flat,
+ patch,
+ centroid,
+ sample,
+ invariant,
+ restrict,
+ aliased,
+ @"volatile",
+ constant,
+ coherent,
+ non_writable,
+ non_readable,
+ uniform,
+ uniform_id: struct { execution: Id },
+ saturated_conversion,
+ stream: struct { stream_number: LiteralInteger },
+ location: struct { location: LiteralInteger },
+ component: struct { component: LiteralInteger },
+ index: struct { index: LiteralInteger },
+ binding: struct { binding_point: LiteralInteger },
+ descriptor_set: struct { descriptor_set: LiteralInteger },
+ offset: struct { byte_offset: LiteralInteger },
+ xfb_buffer: struct { xfb_buffer_number: LiteralInteger },
+ xfb_stride: struct { xfb_stride: LiteralInteger },
+ func_param_attr: struct { function_parameter_attribute: FunctionParameterAttribute },
+ fp_rounding_mode: struct { fp_rounding_mode: FPRoundingMode },
+ fp_fast_math_mode: struct { fp_fast_math_mode: FPFastMathMode },
+ linkage_attributes: struct { name: LiteralString, linkage_type: LinkageType },
+ no_contraction,
+ input_attachment_index: struct { attachment_index: LiteralInteger },
+ alignment: struct { alignment: LiteralInteger },
+ max_byte_offset: struct { max_byte_offset: LiteralInteger },
+ alignment_id: struct { alignment: Id },
+ max_byte_offset_id: struct { max_byte_offset: Id },
+ saturated_to_largest_float8normal_conversion_ext,
+ no_signed_wrap,
+ no_unsigned_wrap,
+ weight_texture_qcom,
+ block_match_texture_qcom,
+ block_match_sampler_qcom,
+ explicit_interp_amd,
+ node_shares_payload_limits_with_amdx: struct { payload_type: Id },
+ node_max_payloads_amdx: struct { max_number_of_payloads: Id },
+ track_finish_writing_amdx,
+ payload_node_name_amdx: struct { node_name: Id },
+ payload_node_base_index_amdx: struct { base_index: Id },
+ payload_node_sparse_array_amdx,
+ payload_node_array_size_amdx: struct { array_size: Id },
+ payload_dispatch_indirect_amdx,
+ override_coverage_nv,
+ passthrough_nv,
+ viewport_relative_nv,
+ secondary_viewport_relative_nv: struct { offset: LiteralInteger },
+ per_primitive_ext,
+ per_view_nv,
+ per_task_nv,
+ per_vertex_khr,
+ non_uniform,
+ restrict_pointer,
+ aliased_pointer,
+ hit_object_shader_record_buffer_nv,
+ bindless_sampler_nv,
+ bindless_image_nv,
+ bound_sampler_nv,
+ bound_image_nv,
+ simt_call_intel: struct { n: LiteralInteger },
+ referenced_indirectly_intel,
+ clobber_intel: struct { register: LiteralString },
+ side_effects_intel,
+ vector_compute_variable_intel,
+ func_param_io_kind_intel: struct { kind: LiteralInteger },
+ vector_compute_function_intel,
+ stack_call_intel,
+ global_variable_offset_intel: struct { offset: LiteralInteger },
+ counter_buffer: struct { counter_buffer: Id },
+ user_semantic: struct { semantic: LiteralString },
+ user_type_google: struct { user_type: LiteralString },
+ function_rounding_mode_intel: struct { target_width: LiteralInteger, fp_rounding_mode: FPRoundingMode },
+ function_denorm_mode_intel: struct { target_width: LiteralInteger, fp_denorm_mode: FPDenormMode },
+ register_intel,
+ memory_intel: struct { memory_type: LiteralString },
+ numbanks_intel: struct { banks: LiteralInteger },
+ bankwidth_intel: struct { bank_width: LiteralInteger },
+ max_private_copies_intel: struct { maximum_copies: LiteralInteger },
+ singlepump_intel,
+ doublepump_intel,
+ max_replicates_intel: struct { maximum_replicates: LiteralInteger },
+ simple_dual_port_intel,
+ merge_intel: struct { merge_key: LiteralString, merge_type: LiteralString },
+ bank_bits_intel: struct { bank_bits: []const LiteralInteger = &.{} },
+ force_pow2depth_intel: struct { force_key: LiteralInteger },
+ stridesize_intel: struct { stride_size: LiteralInteger },
+ wordsize_intel: struct { word_size: LiteralInteger },
+ true_dual_port_intel,
+ burst_coalesce_intel,
+ cache_size_intel: struct { cache_size_in_bytes: LiteralInteger },
+ dont_statically_coalesce_intel,
+ prefetch_intel: struct { prefetcher_size_in_bytes: LiteralInteger },
+ stall_enable_intel,
+ fuse_loops_in_function_intel,
+ math_op_dsp_mode_intel: struct { mode: LiteralInteger, propagate: LiteralInteger },
+ alias_scope_intel: struct { aliasing_scopes_list: Id },
+ no_alias_intel: struct { aliasing_scopes_list: Id },
+ initiation_interval_intel: struct { cycles: LiteralInteger },
+ max_concurrency_intel: struct { invocations: LiteralInteger },
+ pipeline_enable_intel: struct { enable: LiteralInteger },
+ buffer_location_intel: struct { buffer_location_id: LiteralInteger },
+ io_pipe_storage_intel: struct { io_pipe_id: LiteralInteger },
+ function_floating_point_mode_intel: struct { target_width: LiteralInteger, fp_operation_mode: FPOperationMode },
+ single_element_vector_intel,
+ vector_compute_callable_function_intel,
+ media_block_iointel,
+ stall_free_intel,
+ fp_max_error_decoration_intel: struct { max_error: LiteralFloat },
+ latency_control_label_intel: struct { latency_label: LiteralInteger },
+ latency_control_constraint_intel: struct { relative_to: LiteralInteger, control_type: LiteralInteger, relative_cycle: LiteralInteger },
+ conduit_kernel_argument_intel,
+ register_map_kernel_argument_intel,
+ mm_host_interface_address_width_intel: struct { address_width: LiteralInteger },
+ mm_host_interface_data_width_intel: struct { data_width: LiteralInteger },
+ mm_host_interface_latency_intel: struct { latency: LiteralInteger },
+ mm_host_interface_read_write_mode_intel: struct { read_write_mode: AccessQualifier },
+ mm_host_interface_max_burst_intel: struct { max_burst_count: LiteralInteger },
+ mm_host_interface_wait_request_intel: struct { waitrequest: LiteralInteger },
+ stable_kernel_argument_intel,
+ host_access_intel: struct { access: HostAccessQualifier, name: LiteralString },
+ init_mode_intel: struct { trigger: InitializationModeQualifier },
+ implement_in_register_map_intel: struct { value: LiteralInteger },
+ cache_control_load_intel: struct { cache_level: LiteralInteger, cache_control: LoadCacheControl },
+ cache_control_store_intel: struct { cache_level: LiteralInteger, cache_control: StoreCacheControl },
+ };
+};
+pub const BuiltIn = enum(u32) {
+ position = 0,
+ point_size = 1,
+ clip_distance = 3,
+ cull_distance = 4,
+ vertex_id = 5,
+ instance_id = 6,
+ primitive_id = 7,
+ invocation_id = 8,
+ layer = 9,
+ viewport_index = 10,
+ tess_level_outer = 11,
+ tess_level_inner = 12,
+ tess_coord = 13,
+ patch_vertices = 14,
+ frag_coord = 15,
+ point_coord = 16,
+ front_facing = 17,
+ sample_id = 18,
+ sample_position = 19,
+ sample_mask = 20,
+ frag_depth = 22,
+ helper_invocation = 23,
+ num_workgroups = 24,
+ workgroup_size = 25,
+ workgroup_id = 26,
+ local_invocation_id = 27,
+ global_invocation_id = 28,
+ local_invocation_index = 29,
+ work_dim = 30,
+ global_size = 31,
+ enqueued_workgroup_size = 32,
+ global_offset = 33,
+ global_linear_id = 34,
+ subgroup_size = 36,
+ subgroup_max_size = 37,
+ num_subgroups = 38,
+ num_enqueued_subgroups = 39,
+ subgroup_id = 40,
+ subgroup_local_invocation_id = 41,
+ vertex_index = 42,
+ instance_index = 43,
+ core_idarm = 4160,
+ core_count_arm = 4161,
+ core_max_idarm = 4162,
+ warp_idarm = 4163,
+ warp_max_idarm = 4164,
+ subgroup_eq_mask = 4416,
+ subgroup_ge_mask = 4417,
+ subgroup_gt_mask = 4418,
+ subgroup_le_mask = 4419,
+ subgroup_lt_mask = 4420,
+ base_vertex = 4424,
+ base_instance = 4425,
+ draw_index = 4426,
+ primitive_shading_rate_khr = 4432,
+ device_index = 4438,
+ view_index = 4440,
+ shading_rate_khr = 4444,
+ tile_offset_qcom = 4492,
+ tile_dimension_qcom = 4493,
+ tile_apron_size_qcom = 4494,
+ bary_coord_no_persp_amd = 4992,
+ bary_coord_no_persp_centroid_amd = 4993,
+ bary_coord_no_persp_sample_amd = 4994,
+ bary_coord_smooth_amd = 4995,
+ bary_coord_smooth_centroid_amd = 4996,
+ bary_coord_smooth_sample_amd = 4997,
+ bary_coord_pull_model_amd = 4998,
+ frag_stencil_ref_ext = 5014,
+ remaining_recursion_levels_amdx = 5021,
+ shader_index_amdx = 5073,
+ viewport_mask_nv = 5253,
+ secondary_position_nv = 5257,
+ secondary_viewport_mask_nv = 5258,
+ position_per_view_nv = 5261,
+ viewport_mask_per_view_nv = 5262,
+ fully_covered_ext = 5264,
+ task_count_nv = 5274,
+ primitive_count_nv = 5275,
+ primitive_indices_nv = 5276,
+ clip_distance_per_view_nv = 5277,
+ cull_distance_per_view_nv = 5278,
+ layer_per_view_nv = 5279,
+ mesh_view_count_nv = 5280,
+ mesh_view_indices_nv = 5281,
+ bary_coord_khr = 5286,
+ bary_coord_no_persp_khr = 5287,
+ frag_size_ext = 5292,
+ frag_invocation_count_ext = 5293,
+ primitive_point_indices_ext = 5294,
+ primitive_line_indices_ext = 5295,
+ primitive_triangle_indices_ext = 5296,
+ cull_primitive_ext = 5299,
+ launch_id_khr = 5319,
+ launch_size_khr = 5320,
+ world_ray_origin_khr = 5321,
+ world_ray_direction_khr = 5322,
+ object_ray_origin_khr = 5323,
+ object_ray_direction_khr = 5324,
+ ray_tmin_khr = 5325,
+ ray_tmax_khr = 5326,
+ instance_custom_index_khr = 5327,
+ object_to_world_khr = 5330,
+ world_to_object_khr = 5331,
+ hit_tnv = 5332,
+ hit_kind_khr = 5333,
+ current_ray_time_nv = 5334,
+ hit_triangle_vertex_positions_khr = 5335,
+ hit_micro_triangle_vertex_positions_nv = 5337,
+ hit_micro_triangle_vertex_barycentrics_nv = 5344,
+ incoming_ray_flags_khr = 5351,
+ ray_geometry_index_khr = 5352,
+ hit_is_sphere_nv = 5359,
+ hit_is_lssnv = 5360,
+ hit_sphere_position_nv = 5361,
+ warps_per_smnv = 5374,
+ sm_count_nv = 5375,
+ warp_idnv = 5376,
+ smidnv = 5377,
+ hit_lss_positions_nv = 5396,
+ hit_kind_front_facing_micro_triangle_nv = 5405,
+ hit_kind_back_facing_micro_triangle_nv = 5406,
+ hit_sphere_radius_nv = 5420,
+ hit_lss_radii_nv = 5421,
+ cluster_idnv = 5436,
+ cull_mask_khr = 6021,
+};
+pub const Scope = enum(u32) {
+ cross_device = 0,
+ device = 1,
+ workgroup = 2,
+ subgroup = 3,
+ invocation = 4,
+ queue_family = 5,
+ shader_call_khr = 6,
+};
+pub const GroupOperation = enum(u32) {
+ reduce = 0,
+ inclusive_scan = 1,
+ exclusive_scan = 2,
+ clustered_reduce = 3,
+ partitioned_reduce_nv = 6,
+ partitioned_inclusive_scan_nv = 7,
+ partitioned_exclusive_scan_nv = 8,
+};
+pub const KernelEnqueueFlags = enum(u32) {
+ no_wait = 0,
+ wait_kernel = 1,
+ wait_work_group = 2,
+};
+pub const Capability = enum(u32) {
+ matrix = 0,
+ shader = 1,
+ geometry = 2,
+ tessellation = 3,
+ addresses = 4,
+ linkage = 5,
+ kernel = 6,
+ vector16 = 7,
+ float16buffer = 8,
+ float16 = 9,
+ float64 = 10,
+ int64 = 11,
+ int64atomics = 12,
+ image_basic = 13,
+ image_read_write = 14,
+ image_mipmap = 15,
+ pipes = 17,
+ groups = 18,
+ device_enqueue = 19,
+ literal_sampler = 20,
+ atomic_storage = 21,
+ int16 = 22,
+ tessellation_point_size = 23,
+ geometry_point_size = 24,
+ image_gather_extended = 25,
+ storage_image_multisample = 27,
+ uniform_buffer_array_dynamic_indexing = 28,
+ sampled_image_array_dynamic_indexing = 29,
+ storage_buffer_array_dynamic_indexing = 30,
+ storage_image_array_dynamic_indexing = 31,
+ clip_distance = 32,
+ cull_distance = 33,
+ image_cube_array = 34,
+ sample_rate_shading = 35,
+ image_rect = 36,
+ sampled_rect = 37,
+ generic_pointer = 38,
+ int8 = 39,
+ input_attachment = 40,
+ sparse_residency = 41,
+ min_lod = 42,
+ sampled1d = 43,
+ image1d = 44,
+ sampled_cube_array = 45,
+ sampled_buffer = 46,
+ image_buffer = 47,
+ image_ms_array = 48,
+ storage_image_extended_formats = 49,
+ image_query = 50,
+ derivative_control = 51,
+ interpolation_function = 52,
+ transform_feedback = 53,
+ geometry_streams = 54,
+ storage_image_read_without_format = 55,
+ storage_image_write_without_format = 56,
+ multi_viewport = 57,
+ subgroup_dispatch = 58,
+ named_barrier = 59,
+ pipe_storage = 60,
+ group_non_uniform = 61,
+ group_non_uniform_vote = 62,
+ group_non_uniform_arithmetic = 63,
+ group_non_uniform_ballot = 64,
+ group_non_uniform_shuffle = 65,
+ group_non_uniform_shuffle_relative = 66,
+ group_non_uniform_clustered = 67,
+ group_non_uniform_quad = 68,
+ shader_layer = 69,
+ shader_viewport_index = 70,
+ uniform_decoration = 71,
+ core_builtins_arm = 4165,
+ tile_image_color_read_access_ext = 4166,
+ tile_image_depth_read_access_ext = 4167,
+ tile_image_stencil_read_access_ext = 4168,
+ tensors_arm = 4174,
+ storage_tensor_array_dynamic_indexing_arm = 4175,
+ storage_tensor_array_non_uniform_indexing_arm = 4176,
+ graph_arm = 4191,
+ cooperative_matrix_layouts_arm = 4201,
+ float8ext = 4212,
+ float8cooperative_matrix_ext = 4213,
+ fragment_shading_rate_khr = 4422,
+ subgroup_ballot_khr = 4423,
+ draw_parameters = 4427,
+ workgroup_memory_explicit_layout_khr = 4428,
+ workgroup_memory_explicit_layout8bit_access_khr = 4429,
+ workgroup_memory_explicit_layout16bit_access_khr = 4430,
+ subgroup_vote_khr = 4431,
+ storage_buffer16bit_access = 4433,
+ uniform_and_storage_buffer16bit_access = 4434,
+ storage_push_constant16 = 4435,
+ storage_input_output16 = 4436,
+ device_group = 4437,
+ multi_view = 4439,
+ variable_pointers_storage_buffer = 4441,
+ variable_pointers = 4442,
+ atomic_storage_ops = 4445,
+ sample_mask_post_depth_coverage = 4447,
+ storage_buffer8bit_access = 4448,
+ uniform_and_storage_buffer8bit_access = 4449,
+ storage_push_constant8 = 4450,
+ denorm_preserve = 4464,
+ denorm_flush_to_zero = 4465,
+ signed_zero_inf_nan_preserve = 4466,
+ rounding_mode_rte = 4467,
+ rounding_mode_rtz = 4468,
+ ray_query_provisional_khr = 4471,
+ ray_query_khr = 4472,
+ untyped_pointers_khr = 4473,
+ ray_traversal_primitive_culling_khr = 4478,
+ ray_tracing_khr = 4479,
+ texture_sample_weighted_qcom = 4484,
+ texture_box_filter_qcom = 4485,
+ texture_block_match_qcom = 4486,
+ tile_shading_qcom = 4495,
+ texture_block_match2qcom = 4498,
+ float16image_amd = 5008,
+ image_gather_bias_lod_amd = 5009,
+ fragment_mask_amd = 5010,
+ stencil_export_ext = 5013,
+ image_read_write_lod_amd = 5015,
+ int64image_ext = 5016,
+ shader_clock_khr = 5055,
+ shader_enqueue_amdx = 5067,
+ quad_control_khr = 5087,
+ int4type_intel = 5112,
+ int4cooperative_matrix_intel = 5114,
+ b_float16type_khr = 5116,
+ b_float16dot_product_khr = 5117,
+ b_float16cooperative_matrix_khr = 5118,
+ sample_mask_override_coverage_nv = 5249,
+ geometry_shader_passthrough_nv = 5251,
+ shader_viewport_index_layer_ext = 5254,
+ shader_viewport_mask_nv = 5255,
+ shader_stereo_view_nv = 5259,
+ per_view_attributes_nv = 5260,
+ fragment_fully_covered_ext = 5265,
+ mesh_shading_nv = 5266,
+ image_footprint_nv = 5282,
+ mesh_shading_ext = 5283,
+ fragment_barycentric_khr = 5284,
+ compute_derivative_group_quads_khr = 5288,
+ fragment_density_ext = 5291,
+ group_non_uniform_partitioned_nv = 5297,
+ shader_non_uniform = 5301,
+ runtime_descriptor_array = 5302,
+ input_attachment_array_dynamic_indexing = 5303,
+ uniform_texel_buffer_array_dynamic_indexing = 5304,
+ storage_texel_buffer_array_dynamic_indexing = 5305,
+ uniform_buffer_array_non_uniform_indexing = 5306,
+ sampled_image_array_non_uniform_indexing = 5307,
+ storage_buffer_array_non_uniform_indexing = 5308,
+ storage_image_array_non_uniform_indexing = 5309,
+ input_attachment_array_non_uniform_indexing = 5310,
+ uniform_texel_buffer_array_non_uniform_indexing = 5311,
+ storage_texel_buffer_array_non_uniform_indexing = 5312,
+ ray_tracing_position_fetch_khr = 5336,
+ ray_tracing_nv = 5340,
+ ray_tracing_motion_blur_nv = 5341,
+ vulkan_memory_model = 5345,
+ vulkan_memory_model_device_scope = 5346,
+ physical_storage_buffer_addresses = 5347,
+ compute_derivative_group_linear_khr = 5350,
+ ray_tracing_provisional_khr = 5353,
+ cooperative_matrix_nv = 5357,
+ fragment_shader_sample_interlock_ext = 5363,
+ fragment_shader_shading_rate_interlock_ext = 5372,
+ shader_sm_builtins_nv = 5373,
+ fragment_shader_pixel_interlock_ext = 5378,
+ demote_to_helper_invocation = 5379,
+ displacement_micromap_nv = 5380,
+ ray_tracing_opacity_micromap_ext = 5381,
+ shader_invocation_reorder_nv = 5383,
+ bindless_texture_nv = 5390,
+ ray_query_position_fetch_khr = 5391,
+ cooperative_vector_nv = 5394,
+ atomic_float16vector_nv = 5404,
+ ray_tracing_displacement_micromap_nv = 5409,
+ raw_access_chains_nv = 5414,
+ ray_tracing_spheres_geometry_nv = 5418,
+ ray_tracing_linear_swept_spheres_geometry_nv = 5419,
+ cooperative_matrix_reductions_nv = 5430,
+ cooperative_matrix_conversions_nv = 5431,
+ cooperative_matrix_per_element_operations_nv = 5432,
+ cooperative_matrix_tensor_addressing_nv = 5433,
+ cooperative_matrix_block_loads_nv = 5434,
+ cooperative_vector_training_nv = 5435,
+ ray_tracing_cluster_acceleration_structure_nv = 5437,
+ tensor_addressing_nv = 5439,
+ subgroup_shuffle_intel = 5568,
+ subgroup_buffer_block_iointel = 5569,
+ subgroup_image_block_iointel = 5570,
+ subgroup_image_media_block_iointel = 5579,
+ round_to_infinity_intel = 5582,
+ floating_point_mode_intel = 5583,
+ integer_functions2intel = 5584,
+ function_pointers_intel = 5603,
+ indirect_references_intel = 5604,
+ asm_intel = 5606,
+ atomic_float32min_max_ext = 5612,
+ atomic_float64min_max_ext = 5613,
+ atomic_float16min_max_ext = 5616,
+ vector_compute_intel = 5617,
+ vector_any_intel = 5619,
+ expect_assume_khr = 5629,
+ subgroup_avc_motion_estimation_intel = 5696,
+ subgroup_avc_motion_estimation_intra_intel = 5697,
+ subgroup_avc_motion_estimation_chroma_intel = 5698,
+ variable_length_array_intel = 5817,
+ function_float_control_intel = 5821,
+ fpga_memory_attributes_intel = 5824,
+ fp_fast_math_mode_intel = 5837,
+ arbitrary_precision_integers_intel = 5844,
+ arbitrary_precision_floating_point_intel = 5845,
+ unstructured_loop_controls_intel = 5886,
+ fpga_loop_controls_intel = 5888,
+ kernel_attributes_intel = 5892,
+ fpga_kernel_attributes_intel = 5897,
+ fpga_memory_accesses_intel = 5898,
+ fpga_cluster_attributes_intel = 5904,
+ loop_fuse_intel = 5906,
+ fpgadsp_control_intel = 5908,
+ memory_access_aliasing_intel = 5910,
+ fpga_invocation_pipelining_attributes_intel = 5916,
+ fpga_buffer_location_intel = 5920,
+ arbitrary_precision_fixed_point_intel = 5922,
+ usm_storage_classes_intel = 5935,
+ runtime_aligned_attribute_intel = 5939,
+ io_pipes_intel = 5943,
+ blocking_pipes_intel = 5945,
+ fpga_reg_intel = 5948,
+ dot_product_input_all = 6016,
+ dot_product_input4x8bit = 6017,
+ dot_product_input4x8bit_packed = 6018,
+ dot_product = 6019,
+ ray_cull_mask_khr = 6020,
+ cooperative_matrix_khr = 6022,
+ replicated_composites_ext = 6024,
+ bit_instructions = 6025,
+ group_non_uniform_rotate_khr = 6026,
+ float_controls2 = 6029,
+ atomic_float32add_ext = 6033,
+ atomic_float64add_ext = 6034,
+ long_composites_intel = 6089,
+ opt_none_ext = 6094,
+ atomic_float16add_ext = 6095,
+ debug_info_module_intel = 6114,
+ b_float16conversion_intel = 6115,
+ split_barrier_intel = 6141,
+ arithmetic_fence_ext = 6144,
+ fpga_cluster_attributes_v2intel = 6150,
+ fpga_kernel_attributesv2intel = 6161,
+ task_sequence_intel = 6162,
+ fp_max_error_intel = 6169,
+ fpga_latency_control_intel = 6171,
+ fpga_argument_interfaces_intel = 6174,
+ global_variable_host_access_intel = 6187,
+ global_variable_fpga_decorations_intel = 6189,
+ subgroup_buffer_prefetch_intel = 6220,
+ subgroup2d_block_iointel = 6228,
+ subgroup2d_block_transform_intel = 6229,
+ subgroup2d_block_transpose_intel = 6230,
+ subgroup_matrix_multiply_accumulate_intel = 6236,
+ ternary_bitwise_function_intel = 6241,
+ group_uniform_arithmetic_khr = 6400,
+ tensor_float32rounding_intel = 6425,
+ masked_gather_scatter_intel = 6427,
+ cache_controls_intel = 6441,
+ register_limits_intel = 6460,
+ bindless_images_intel = 6528,
+};
+pub const RayQueryIntersection = enum(u32) {
+ ray_query_candidate_intersection_khr = 0,
+ ray_query_committed_intersection_khr = 1,
+};
+pub const RayQueryCommittedIntersectionType = enum(u32) {
+ ray_query_committed_intersection_none_khr = 0,
+ ray_query_committed_intersection_triangle_khr = 1,
+ ray_query_committed_intersection_generated_khr = 2,
+};
+pub const RayQueryCandidateIntersectionType = enum(u32) {
+ ray_query_candidate_intersection_triangle_khr = 0,
+ ray_query_candidate_intersection_aabbkhr = 1,
+};
+pub const PackedVectorFormat = enum(u32) {
+ packed_vector_format4x8bit = 0,
+};
+pub const CooperativeMatrixOperands = packed struct {
+ matrix_a_signed_components_khr: bool = false,
+ matrix_b_signed_components_khr: bool = false,
+ matrix_c_signed_components_khr: bool = false,
+ matrix_result_signed_components_khr: bool = false,
+ saturating_accumulation_khr: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const CooperativeMatrixLayout = enum(u32) {
+ row_major_khr = 0,
+ column_major_khr = 1,
+ row_blocked_interleaved_arm = 4202,
+ column_blocked_interleaved_arm = 4203,
+};
+pub const CooperativeMatrixUse = enum(u32) {
+ matrix_akhr = 0,
+ matrix_bkhr = 1,
+ matrix_accumulator_khr = 2,
+};
+pub const CooperativeMatrixReduce = packed struct {
+ row: bool = false,
+ column: bool = false,
+ @"2x2": bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const TensorClampMode = enum(u32) {
+ undefined = 0,
+ constant = 1,
+ clamp_to_edge = 2,
+ repeat = 3,
+ repeat_mirrored = 4,
+};
+pub const TensorAddressingOperands = packed struct {
+ tensor_view: bool = false,
+ decode_func: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ tensor_view: ?struct { id_ref: Id } = null,
+ decode_func: ?struct { id_ref: Id } = null,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
+};
+pub const InitializationModeQualifier = enum(u32) {
+ init_on_device_reprogram_intel = 0,
+ init_on_device_reset_intel = 1,
+};
+pub const LoadCacheControl = enum(u32) {
+ uncached_intel = 0,
+ cached_intel = 1,
+ streaming_intel = 2,
+ invalidate_after_read_intel = 3,
+ const_cached_intel = 4,
+};
+pub const StoreCacheControl = enum(u32) {
+ uncached_intel = 0,
+ write_through_intel = 1,
+ write_back_intel = 2,
+ streaming_intel = 3,
+};
+pub const NamedMaximumNumberOfRegisters = enum(u32) {
+ auto_intel = 0,
+};
+pub const MatrixMultiplyAccumulateOperands = packed struct {
+ matrix_a_signed_components_intel: bool = false,
+ matrix_b_signed_components_intel: bool = false,
+ matrix_cb_float16intel: bool = false,
+ matrix_result_b_float16intel: bool = false,
+ matrix_a_packed_int8intel: bool = false,
+ matrix_b_packed_int8intel: bool = false,
+ matrix_a_packed_int4intel: bool = false,
+ matrix_b_packed_int4intel: bool = false,
+ matrix_atf32intel: bool = false,
+ matrix_btf32intel: bool = false,
+ matrix_a_packed_float16intel: bool = false,
+ matrix_b_packed_float16intel: bool = false,
+ matrix_a_packed_b_float16intel: bool = false,
+ matrix_b_packed_b_float16intel: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const FPEncoding = enum(u32) {
+ b_float16khr = 0,
+ float8e4m3ext = 4214,
+ float8e5m2ext = 4215,
+};
+pub const CooperativeVectorMatrixLayout = enum(u32) {
+ row_major_nv = 0,
+ column_major_nv = 1,
+ inferencing_optimal_nv = 2,
+ training_optimal_nv = 3,
+};
+pub const ComponentType = enum(u32) {
+ float16nv = 0,
+ float32nv = 1,
+ float64nv = 2,
+ signed_int8nv = 3,
+ signed_int16nv = 4,
+ signed_int32nv = 5,
+ signed_int64nv = 6,
+ unsigned_int8nv = 7,
+ unsigned_int16nv = 8,
+ unsigned_int32nv = 9,
+ unsigned_int64nv = 10,
+ signed_int8packed_nv = 1000491000,
+ unsigned_int8packed_nv = 1000491001,
+ float_e4m3nv = 1000491002,
+ float_e5m2nv = 1000491003,
+};
+pub const TensorOperands = packed struct {
+ nontemporal_arm: bool = false,
+ out_of_bounds_value_arm: bool = false,
+ make_element_available_arm: bool = false,
+ make_element_visible_arm: bool = false,
+ non_private_element_arm: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+
+ pub const Extended = struct {
+ nontemporal_arm: bool = false,
+ out_of_bounds_value_arm: ?struct { id_ref: Id } = null,
+ make_element_available_arm: ?struct { id_ref: Id } = null,
+ make_element_visible_arm: ?struct { id_ref: Id } = null,
+ non_private_element_arm: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+ };
+};
+pub const @"DebugInfo.DebugInfoFlags" = packed struct {
+ flag_is_protected: bool = false,
+ flag_is_private: bool = false,
+ flag_is_local: bool = false,
+ flag_is_definition: bool = false,
+ flag_fwd_decl: bool = false,
+ flag_artificial: bool = false,
+ flag_explicit: bool = false,
+ flag_prototyped: bool = false,
+ flag_object_pointer: bool = false,
+ flag_static_member: bool = false,
+ flag_indirect_variable: bool = false,
+ flag_l_value_reference: bool = false,
+ flag_r_value_reference: bool = false,
+ flag_is_optimized: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const @"DebugInfo.DebugBaseTypeAttributeEncoding" = enum(u32) {
+ unspecified = 0,
+ address = 1,
+ boolean = 2,
+ float = 4,
+ signed = 5,
+ signed_char = 6,
+ unsigned = 7,
+ unsigned_char = 8,
+};
+pub const @"DebugInfo.DebugCompositeType" = enum(u32) {
+ class = 0,
+ structure = 1,
+ @"union" = 2,
+};
+pub const @"DebugInfo.DebugTypeQualifier" = enum(u32) {
+ const_type = 0,
+ volatile_type = 1,
+ restrict_type = 2,
+};
+pub const @"DebugInfo.DebugOperation" = enum(u32) {
+ deref = 0,
+ plus = 1,
+ minus = 2,
+ plus_uconst = 3,
+ bit_piece = 4,
+ swap = 5,
+ xderef = 6,
+ stack_value = 7,
+ constu = 8,
+
+ pub const Extended = union(@"DebugInfo.DebugOperation") {
+ deref,
+ plus,
+ minus,
+ plus_uconst: struct { literal_integer: LiteralInteger },
+ bit_piece: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger },
+ swap,
+ xderef,
+ stack_value,
+ constu: struct { literal_integer: LiteralInteger },
+ };
+};
+pub const @"OpenCL.DebugInfo.100.DebugInfoFlags" = packed struct {
+ flag_is_protected: bool = false,
+ flag_is_private: bool = false,
+ flag_is_local: bool = false,
+ flag_is_definition: bool = false,
+ flag_fwd_decl: bool = false,
+ flag_artificial: bool = false,
+ flag_explicit: bool = false,
+ flag_prototyped: bool = false,
+ flag_object_pointer: bool = false,
+ flag_static_member: bool = false,
+ flag_indirect_variable: bool = false,
+ flag_l_value_reference: bool = false,
+ flag_r_value_reference: bool = false,
+ flag_is_optimized: bool = false,
+ flag_is_enum_class: bool = false,
+ flag_type_pass_by_value: bool = false,
+ flag_type_pass_by_reference: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const @"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
+ unspecified = 0,
+ address = 1,
+ boolean = 2,
+ float = 3,
+ signed = 4,
+ signed_char = 5,
+ unsigned = 6,
+ unsigned_char = 7,
+};
+pub const @"OpenCL.DebugInfo.100.DebugCompositeType" = enum(u32) {
+ class = 0,
+ structure = 1,
+ @"union" = 2,
+};
+pub const @"OpenCL.DebugInfo.100.DebugTypeQualifier" = enum(u32) {
+ const_type = 0,
+ volatile_type = 1,
+ restrict_type = 2,
+ atomic_type = 3,
+};
+pub const @"OpenCL.DebugInfo.100.DebugOperation" = enum(u32) {
+ deref = 0,
+ plus = 1,
+ minus = 2,
+ plus_uconst = 3,
+ bit_piece = 4,
+ swap = 5,
+ xderef = 6,
+ stack_value = 7,
+ constu = 8,
+ fragment = 9,
+
+ pub const Extended = union(@"OpenCL.DebugInfo.100.DebugOperation") {
+ deref,
+ plus,
+ minus,
+ plus_uconst: struct { literal_integer: LiteralInteger },
+ bit_piece: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger },
+ swap,
+ xderef,
+ stack_value,
+ constu: struct { literal_integer: LiteralInteger },
+ fragment: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger },
+ };
+};
+pub const @"OpenCL.DebugInfo.100.DebugImportedEntity" = enum(u32) {
+ imported_module = 0,
+ imported_declaration = 1,
+};
+pub const @"NonSemantic.ClspvReflection.6.KernelPropertyFlags" = packed struct {
+ may_use_printf: bool = false,
+ _reserved_bit_1: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugInfoFlags" = packed struct {
+ flag_is_protected: bool = false,
+ flag_is_private: bool = false,
+ flag_is_local: bool = false,
+ flag_is_definition: bool = false,
+ flag_fwd_decl: bool = false,
+ flag_artificial: bool = false,
+ flag_explicit: bool = false,
+ flag_prototyped: bool = false,
+ flag_object_pointer: bool = false,
+ flag_static_member: bool = false,
+ flag_indirect_variable: bool = false,
+ flag_l_value_reference: bool = false,
+ flag_r_value_reference: bool = false,
+ flag_is_optimized: bool = false,
+ flag_is_enum_class: bool = false,
+ flag_type_pass_by_value: bool = false,
+ flag_type_pass_by_reference: bool = false,
+ flag_unknown_physical_layout: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.BuildIdentifierFlags" = packed struct {
+ identifier_possible_duplicates: bool = false,
+ _reserved_bit_1: bool = false,
+ _reserved_bit_2: bool = false,
+ _reserved_bit_3: bool = false,
+ _reserved_bit_4: bool = false,
+ _reserved_bit_5: bool = false,
+ _reserved_bit_6: bool = false,
+ _reserved_bit_7: bool = false,
+ _reserved_bit_8: bool = false,
+ _reserved_bit_9: bool = false,
+ _reserved_bit_10: bool = false,
+ _reserved_bit_11: bool = false,
+ _reserved_bit_12: bool = false,
+ _reserved_bit_13: bool = false,
+ _reserved_bit_14: bool = false,
+ _reserved_bit_15: bool = false,
+ _reserved_bit_16: bool = false,
+ _reserved_bit_17: bool = false,
+ _reserved_bit_18: bool = false,
+ _reserved_bit_19: bool = false,
+ _reserved_bit_20: bool = false,
+ _reserved_bit_21: bool = false,
+ _reserved_bit_22: bool = false,
+ _reserved_bit_23: bool = false,
+ _reserved_bit_24: bool = false,
+ _reserved_bit_25: bool = false,
+ _reserved_bit_26: bool = false,
+ _reserved_bit_27: bool = false,
+ _reserved_bit_28: bool = false,
+ _reserved_bit_29: bool = false,
+ _reserved_bit_30: bool = false,
+ _reserved_bit_31: bool = false,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
+ unspecified = 0,
+ address = 1,
+ boolean = 2,
+ float = 3,
+ signed = 4,
+ signed_char = 5,
+ unsigned = 6,
+ unsigned_char = 7,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugCompositeType" = enum(u32) {
+ class = 0,
+ structure = 1,
+ @"union" = 2,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugTypeQualifier" = enum(u32) {
+ const_type = 0,
+ volatile_type = 1,
+ restrict_type = 2,
+ atomic_type = 3,
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugOperation" = enum(u32) {
+ deref = 0,
+ plus = 1,
+ minus = 2,
+ plus_uconst = 3,
+ bit_piece = 4,
+ swap = 5,
+ xderef = 6,
+ stack_value = 7,
+ constu = 8,
+ fragment = 9,
+
+ pub const Extended = union(@"NonSemantic.Shader.DebugInfo.100.DebugOperation") {
+ deref,
+ plus,
+ minus,
+ plus_uconst: struct { id_ref: Id },
+ bit_piece: struct { id_ref_0: Id, id_ref_1: Id },
+ swap,
+ xderef,
+ stack_value,
+ constu: struct { id_ref: Id },
+ fragment: struct { id_ref_0: Id, id_ref_1: Id },
+ };
+};
+pub const @"NonSemantic.Shader.DebugInfo.100.DebugImportedEntity" = enum(u32) {
+ imported_module = 0,
+ imported_declaration = 1,
+};
+pub const InstructionSet = enum {
+ core,
+ SPV_AMD_shader_trinary_minmax,
+ SPV_EXT_INST_TYPE_TOSA_001000_1,
+ @"NonSemantic.VkspReflection",
+ SPV_AMD_shader_explicit_vertex_parameter,
+ DebugInfo,
+ @"NonSemantic.DebugBreak",
+ @"OpenCL.DebugInfo.100",
+ @"NonSemantic.ClspvReflection.6",
+ @"GLSL.std.450",
+ SPV_AMD_shader_ballot,
+ @"NonSemantic.DebugPrintf",
+ SPV_AMD_gcn_shader,
+ @"OpenCL.std",
+ @"NonSemantic.Shader.DebugInfo.100",
+ zig,
+
+ pub fn instructions(self: InstructionSet) []const Instruction {
+ return switch (self) {
+ .core => &.{
+ .{
+ .name = "OpNop",
+ .opcode = 0,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpUndef",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSourceContinued",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSource",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .source_language, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .literal_string, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSourceExtension",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpName",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMemberName",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpString",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLine",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExtension",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExtInstImport",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExtInst",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_ext_inst_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpMemoryModel",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .addressing_model, .quantifier = .required },
+ .{ .kind = .memory_model, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEntryPoint",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .execution_model, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpExecutionMode",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .execution_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCapability",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .capability, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeVoid",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeBool",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeInt",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeFloat",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .fp_encoding, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTypeVector",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeMatrix",
+ .opcode = 24,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeImage",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .dim, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .image_format, .quantifier = .required },
+ .{ .kind = .access_qualifier, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTypeSampler",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeSampledImage",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeArray",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeRuntimeArray",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeStruct",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTypeOpaque",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypePointer",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeFunction",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTypeEvent",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeDeviceEvent",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeReserveId",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeQueue",
+ .opcode = 37,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypePipe",
+ .opcode = 38,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .access_qualifier, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeForwardPointer",
+ .opcode = 39,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantTrue",
+ .opcode = 41,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantFalse",
+ .opcode = 42,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstant",
+ .opcode = 43,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_context_dependent_number, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantComposite",
+ .opcode = 44,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpConstantSampler",
+ .opcode = 45,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .sampler_addressing_mode, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .sampler_filter_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantNull",
+ .opcode = 46,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstantTrue",
+ .opcode = 48,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstantFalse",
+ .opcode = 49,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstant",
+ .opcode = 50,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_context_dependent_number, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstantComposite",
+ .opcode = 51,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpSpecConstantOp",
+ .opcode = 52,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_spec_constant_op_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFunction",
+ .opcode = 54,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .function_control, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFunctionParameter",
+ .opcode = 55,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFunctionEnd",
+ .opcode = 56,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpFunctionCall",
+ .opcode = 57,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpVariable",
+ .opcode = 59,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageTexelPointer",
+ .opcode = 60,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLoad",
+ .opcode = 61,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpStore",
+ .opcode = 62,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCopyMemory",
+ .opcode = 63,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCopyMemorySized",
+ .opcode = 64,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpAccessChain",
+ .opcode = 65,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpInBoundsAccessChain",
+ .opcode = 66,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpPtrAccessChain",
+ .opcode = 67,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpArrayLength",
+ .opcode = 68,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGenericPtrMemSemantics",
+ .opcode = 69,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpInBoundsPtrAccessChain",
+ .opcode = 70,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpDecorate",
+ .opcode = 71,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .decoration, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMemberDecorate",
+ .opcode = 72,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .decoration, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDecorationGroup",
+ .opcode = 73,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupDecorate",
+ .opcode = 74,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpGroupMemberDecorate",
+ .opcode = 75,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .pair_id_ref_literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpVectorExtractDynamic",
+ .opcode = 77,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVectorInsertDynamic",
+ .opcode = 78,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVectorShuffle",
+ .opcode = 79,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCompositeConstruct",
+ .opcode = 80,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCompositeExtract",
+ .opcode = 81,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCompositeInsert",
+ .opcode = 82,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCopyObject",
+ .opcode = 83,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTranspose",
+ .opcode = 84,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSampledImage",
+ .opcode = 86,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleImplicitLod",
+ .opcode = 87,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSampleExplicitLod",
+ .opcode = 88,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleDrefImplicitLod",
+ .opcode = 89,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSampleDrefExplicitLod",
+ .opcode = 90,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleProjImplicitLod",
+ .opcode = 91,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSampleProjExplicitLod",
+ .opcode = 92,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleProjDrefImplicitLod",
+ .opcode = 93,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSampleProjDrefExplicitLod",
+ .opcode = 94,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageFetch",
+ .opcode = 95,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageGather",
+ .opcode = 96,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageDrefGather",
+ .opcode = 97,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageRead",
+ .opcode = 98,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageWrite",
+ .opcode = 99,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImage",
+ .opcode = 100,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQueryFormat",
+ .opcode = 101,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQueryOrder",
+ .opcode = 102,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQuerySizeLod",
+ .opcode = 103,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQuerySize",
+ .opcode = 104,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQueryLod",
+ .opcode = 105,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQueryLevels",
+ .opcode = 106,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageQuerySamples",
+ .opcode = 107,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertFToU",
+ .opcode = 109,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertFToS",
+ .opcode = 110,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertSToF",
+ .opcode = 111,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToF",
+ .opcode = 112,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUConvert",
+ .opcode = 113,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSConvert",
+ .opcode = 114,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFConvert",
+ .opcode = 115,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpQuantizeToF16",
+ .opcode = 116,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertPtrToU",
+ .opcode = 117,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSatConvertSToU",
+ .opcode = 118,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSatConvertUToS",
+ .opcode = 119,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToPtr",
+ .opcode = 120,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPtrCastToGeneric",
+ .opcode = 121,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGenericCastToPtr",
+ .opcode = 122,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGenericCastToPtrExplicit",
+ .opcode = 123,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitcast",
+ .opcode = 124,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSNegate",
+ .opcode = 126,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFNegate",
+ .opcode = 127,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIAdd",
+ .opcode = 128,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFAdd",
+ .opcode = 129,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpISub",
+ .opcode = 130,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFSub",
+ .opcode = 131,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIMul",
+ .opcode = 132,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFMul",
+ .opcode = 133,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUDiv",
+ .opcode = 134,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSDiv",
+ .opcode = 135,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFDiv",
+ .opcode = 136,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUMod",
+ .opcode = 137,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSRem",
+ .opcode = 138,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSMod",
+ .opcode = 139,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFRem",
+ .opcode = 140,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFMod",
+ .opcode = 141,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVectorTimesScalar",
+ .opcode = 142,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMatrixTimesScalar",
+ .opcode = 143,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVectorTimesMatrix",
+ .opcode = 144,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMatrixTimesVector",
+ .opcode = 145,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMatrixTimesMatrix",
+ .opcode = 146,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpOuterProduct",
+ .opcode = 147,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDot",
+ .opcode = 148,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIAddCarry",
+ .opcode = 149,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpISubBorrow",
+ .opcode = 150,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUMulExtended",
+ .opcode = 151,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSMulExtended",
+ .opcode = 152,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAny",
+ .opcode = 154,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAll",
+ .opcode = 155,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsNan",
+ .opcode = 156,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsInf",
+ .opcode = 157,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsFinite",
+ .opcode = 158,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsNormal",
+ .opcode = 159,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSignBitSet",
+ .opcode = 160,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLessOrGreater",
+ .opcode = 161,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpOrdered",
+ .opcode = 162,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUnordered",
+ .opcode = 163,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLogicalEqual",
+ .opcode = 164,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLogicalNotEqual",
+ .opcode = 165,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLogicalOr",
+ .opcode = 166,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLogicalAnd",
+ .opcode = 167,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLogicalNot",
+ .opcode = 168,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSelect",
+ .opcode = 169,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIEqual",
+ .opcode = 170,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpINotEqual",
+ .opcode = 171,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUGreaterThan",
+ .opcode = 172,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSGreaterThan",
+ .opcode = 173,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUGreaterThanEqual",
+ .opcode = 174,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSGreaterThanEqual",
+ .opcode = 175,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpULessThan",
+ .opcode = 176,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSLessThan",
+ .opcode = 177,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpULessThanEqual",
+ .opcode = 178,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSLessThanEqual",
+ .opcode = 179,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdEqual",
+ .opcode = 180,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordEqual",
+ .opcode = 181,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdNotEqual",
+ .opcode = 182,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordNotEqual",
+ .opcode = 183,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdLessThan",
+ .opcode = 184,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordLessThan",
+ .opcode = 185,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdGreaterThan",
+ .opcode = 186,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordGreaterThan",
+ .opcode = 187,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdLessThanEqual",
+ .opcode = 188,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordLessThanEqual",
+ .opcode = 189,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFOrdGreaterThanEqual",
+ .opcode = 190,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFUnordGreaterThanEqual",
+ .opcode = 191,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpShiftRightLogical",
+ .opcode = 194,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpShiftRightArithmetic",
+ .opcode = 195,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpShiftLeftLogical",
+ .opcode = 196,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitwiseOr",
+ .opcode = 197,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitwiseXor",
+ .opcode = 198,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitwiseAnd",
+ .opcode = 199,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpNot",
+ .opcode = 200,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitFieldInsert",
+ .opcode = 201,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitFieldSExtract",
+ .opcode = 202,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitFieldUExtract",
+ .opcode = 203,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitReverse",
+ .opcode = 204,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBitCount",
+ .opcode = 205,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdx",
+ .opcode = 207,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdy",
+ .opcode = 208,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFwidth",
+ .opcode = 209,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdxFine",
+ .opcode = 210,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdyFine",
+ .opcode = 211,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFwidthFine",
+ .opcode = 212,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdxCoarse",
+ .opcode = 213,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDPdyCoarse",
+ .opcode = 214,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFwidthCoarse",
+ .opcode = 215,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEmitVertex",
+ .opcode = 218,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpEndPrimitive",
+ .opcode = 219,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpEmitStreamVertex",
+ .opcode = 220,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEndStreamPrimitive",
+ .opcode = 221,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpControlBarrier",
+ .opcode = 224,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMemoryBarrier",
+ .opcode = 225,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicLoad",
+ .opcode = 227,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicStore",
+ .opcode = 228,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicExchange",
+ .opcode = 229,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicCompareExchange",
+ .opcode = 230,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicCompareExchangeWeak",
+ .opcode = 231,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicIIncrement",
+ .opcode = 232,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicIDecrement",
+ .opcode = 233,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicIAdd",
+ .opcode = 234,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicISub",
+ .opcode = 235,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicSMin",
+ .opcode = 236,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicUMin",
+ .opcode = 237,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicSMax",
+ .opcode = 238,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicUMax",
+ .opcode = 239,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicAnd",
+ .opcode = 240,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicOr",
+ .opcode = 241,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicXor",
+ .opcode = 242,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPhi",
+ .opcode = 245,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .pair_id_ref_id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpLoopMerge",
+ .opcode = 246,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .loop_control, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSelectionMerge",
+ .opcode = 247,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .selection_control, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLabel",
+ .opcode = 248,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBranch",
+ .opcode = 249,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBranchConditional",
+ .opcode = 250,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpSwitch",
+ .opcode = 251,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .pair_literal_integer_id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpKill",
+ .opcode = 252,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpReturn",
+ .opcode = 253,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpReturnValue",
+ .opcode = 254,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUnreachable",
+ .opcode = 255,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpLifetimeStart",
+ .opcode = 256,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLifetimeStop",
+ .opcode = 257,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupAsyncCopy",
+ .opcode = 259,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupWaitEvents",
+ .opcode = 260,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupAll",
+ .opcode = 261,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupAny",
+ .opcode = 262,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupBroadcast",
+ .opcode = 263,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupIAdd",
+ .opcode = 264,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFAdd",
+ .opcode = 265,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFMin",
+ .opcode = 266,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupUMin",
+ .opcode = 267,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupSMin",
+ .opcode = 268,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFMax",
+ .opcode = 269,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupUMax",
+ .opcode = 270,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupSMax",
+ .opcode = 271,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReadPipe",
+ .opcode = 274,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpWritePipe",
+ .opcode = 275,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReservedReadPipe",
+ .opcode = 276,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReservedWritePipe",
+ .opcode = 277,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReserveReadPipePackets",
+ .opcode = 278,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReserveWritePipePackets",
+ .opcode = 279,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCommitReadPipe",
+ .opcode = 280,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCommitWritePipe",
+ .opcode = 281,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsValidReserveId",
+ .opcode = 282,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetNumPipePackets",
+ .opcode = 283,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetMaxPipePackets",
+ .opcode = 284,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupReserveReadPipePackets",
+ .opcode = 285,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupReserveWritePipePackets",
+ .opcode = 286,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupCommitReadPipe",
+ .opcode = 287,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupCommitWritePipe",
+ .opcode = 288,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEnqueueMarker",
+ .opcode = 291,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEnqueueKernel",
+ .opcode = 292,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpGetKernelNDrangeSubGroupCount",
+ .opcode = 293,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetKernelNDrangeMaxSubGroupSize",
+ .opcode = 294,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetKernelWorkGroupSize",
+ .opcode = 295,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetKernelPreferredWorkGroupSizeMultiple",
+ .opcode = 296,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRetainEvent",
+ .opcode = 297,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReleaseEvent",
+ .opcode = 298,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCreateUserEvent",
+ .opcode = 299,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsValidEvent",
+ .opcode = 300,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSetUserEventStatus",
+ .opcode = 301,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCaptureEventProfilingInfo",
+ .opcode = 302,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetDefaultQueue",
+ .opcode = 303,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBuildNDRange",
+ .opcode = 304,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleImplicitLod",
+ .opcode = 305,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleExplicitLod",
+ .opcode = 306,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleDrefImplicitLod",
+ .opcode = 307,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleDrefExplicitLod",
+ .opcode = 308,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleProjImplicitLod",
+ .opcode = 309,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleProjExplicitLod",
+ .opcode = 310,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleProjDrefImplicitLod",
+ .opcode = 311,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseSampleProjDrefExplicitLod",
+ .opcode = 312,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseFetch",
+ .opcode = 313,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseGather",
+ .opcode = 314,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseDrefGather",
+ .opcode = 315,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpImageSparseTexelsResident",
+ .opcode = 316,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpNoLine",
+ .opcode = 317,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpAtomicFlagTestAndSet",
+ .opcode = 318,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicFlagClear",
+ .opcode = 319,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSparseRead",
+ .opcode = 320,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSizeOf",
+ .opcode = 321,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypePipeStorage",
+ .opcode = 322,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantPipeStorage",
+ .opcode = 323,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCreatePipeFromPipeStorage",
+ .opcode = 324,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetKernelLocalSizeForSubgroupCount",
+ .opcode = 325,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGetKernelMaxNumSubgroups",
+ .opcode = 326,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeNamedBarrier",
+ .opcode = 327,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpNamedBarrierInitialize",
+ .opcode = 328,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMemoryNamedBarrier",
+ .opcode = 329,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpModuleProcessed",
+ .opcode = 330,
+ .operands = &.{
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExecutionModeId",
+ .opcode = 331,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .execution_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDecorateId",
+ .opcode = 332,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .decoration, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformElect",
+ .opcode = 333,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformAll",
+ .opcode = 334,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformAny",
+ .opcode = 335,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformAllEqual",
+ .opcode = 336,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBroadcast",
+ .opcode = 337,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBroadcastFirst",
+ .opcode = 338,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBallot",
+ .opcode = 339,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformInverseBallot",
+ .opcode = 340,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBallotBitExtract",
+ .opcode = 341,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBallotBitCount",
+ .opcode = 342,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBallotFindLSB",
+ .opcode = 343,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBallotFindMSB",
+ .opcode = 344,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformShuffle",
+ .opcode = 345,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformShuffleXor",
+ .opcode = 346,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformShuffleUp",
+ .opcode = 347,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformShuffleDown",
+ .opcode = 348,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformIAdd",
+ .opcode = 349,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformFAdd",
+ .opcode = 350,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformIMul",
+ .opcode = 351,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformFMul",
+ .opcode = 352,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformSMin",
+ .opcode = 353,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformUMin",
+ .opcode = 354,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformFMin",
+ .opcode = 355,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformSMax",
+ .opcode = 356,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformUMax",
+ .opcode = 357,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformFMax",
+ .opcode = 358,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBitwiseAnd",
+ .opcode = 359,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBitwiseOr",
+ .opcode = 360,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformBitwiseXor",
+ .opcode = 361,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformLogicalAnd",
+ .opcode = 362,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformLogicalOr",
+ .opcode = 363,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformLogicalXor",
+ .opcode = 364,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformQuadBroadcast",
+ .opcode = 365,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformQuadSwap",
+ .opcode = 366,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCopyLogical",
+ .opcode = 400,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPtrEqual",
+ .opcode = 401,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPtrNotEqual",
+ .opcode = 402,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPtrDiff",
+ .opcode = 403,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpColorAttachmentReadEXT",
+ .opcode = 4160,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpDepthAttachmentReadEXT",
+ .opcode = 4161,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpStencilAttachmentReadEXT",
+ .opcode = 4162,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTypeTensorARM",
+ .opcode = 4163,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTensorReadARM",
+ .opcode = 4164,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .tensor_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTensorWriteARM",
+ .opcode = 4165,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .tensor_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTensorQuerySizeARM",
+ .opcode = 4166,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGraphConstantARM",
+ .opcode = 4181,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGraphEntryPointARM",
+ .opcode = 4182,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpGraphARM",
+ .opcode = 4183,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGraphInputARM",
+ .opcode = 4184,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpGraphSetOutputARM",
+ .opcode = 4185,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpGraphEndARM",
+ .opcode = 4186,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpTypeGraphARM",
+ .opcode = 4190,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTerminateInvocation",
+ .opcode = 4416,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpTypeUntypedPointerKHR",
+ .opcode = 4417,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUntypedVariableKHR",
+ .opcode = 4418,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpUntypedAccessChainKHR",
+ .opcode = 4419,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpUntypedInBoundsAccessChainKHR",
+ .opcode = 4420,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpSubgroupBallotKHR",
+ .opcode = 4421,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupFirstInvocationKHR",
+ .opcode = 4422,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUntypedPtrAccessChainKHR",
+ .opcode = 4423,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpUntypedInBoundsPtrAccessChainKHR",
+ .opcode = 4424,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpUntypedArrayLengthKHR",
+ .opcode = 4425,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUntypedPrefetchKHR",
+ .opcode = 4426,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSubgroupAllKHR",
+ .opcode = 4428,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAnyKHR",
+ .opcode = 4429,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAllEqualKHR",
+ .opcode = 4430,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformRotateKHR",
+ .opcode = 4431,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSubgroupReadInvocationKHR",
+ .opcode = 4432,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExtInstWithForwardRefsKHR",
+ .opcode = 4433,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_ext_inst_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTraceRayKHR",
+ .opcode = 4445,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExecuteCallableKHR",
+ .opcode = 4446,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToAccelerationStructureKHR",
+ .opcode = 4447,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIgnoreIntersectionKHR",
+ .opcode = 4448,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpTerminateRayKHR",
+ .opcode = 4449,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpSDot",
+ .opcode = 4450,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpUDot",
+ .opcode = 4451,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSUDot",
+ .opcode = 4452,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSDotAccSat",
+ .opcode = 4453,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpUDotAccSat",
+ .opcode = 4454,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSUDotAccSat",
+ .opcode = 4455,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .packed_vector_format, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTypeCooperativeMatrixKHR",
+ .opcode = 4456,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixLoadKHR",
+ .opcode = 4457,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixStoreKHR",
+ .opcode = 4458,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixMulAddKHR",
+ .opcode = 4459,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .cooperative_matrix_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixLengthKHR",
+ .opcode = 4460,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantCompositeReplicateEXT",
+ .opcode = 4461,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstantCompositeReplicateEXT",
+ .opcode = 4462,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCompositeConstructReplicateEXT",
+ .opcode = 4463,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeRayQueryKHR",
+ .opcode = 4472,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryInitializeKHR",
+ .opcode = 4473,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryTerminateKHR",
+ .opcode = 4474,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGenerateIntersectionKHR",
+ .opcode = 4475,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryConfirmIntersectionKHR",
+ .opcode = 4476,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryProceedKHR",
+ .opcode = 4477,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionTypeKHR",
+ .opcode = 4479,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleWeightedQCOM",
+ .opcode = 4480,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBoxFilterQCOM",
+ .opcode = 4481,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchSSDQCOM",
+ .opcode = 4482,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchSADQCOM",
+ .opcode = 4483,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchWindowSSDQCOM",
+ .opcode = 4500,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchWindowSADQCOM",
+ .opcode = 4501,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchGatherSSDQCOM",
+ .opcode = 4502,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageBlockMatchGatherSADQCOM",
+ .opcode = 4503,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupIAddNonUniformAMD",
+ .opcode = 5000,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFAddNonUniformAMD",
+ .opcode = 5001,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFMinNonUniformAMD",
+ .opcode = 5002,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupUMinNonUniformAMD",
+ .opcode = 5003,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupSMinNonUniformAMD",
+ .opcode = 5004,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFMaxNonUniformAMD",
+ .opcode = 5005,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupUMaxNonUniformAMD",
+ .opcode = 5006,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupSMaxNonUniformAMD",
+ .opcode = 5007,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFragmentMaskFetchAMD",
+ .opcode = 5011,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFragmentFetchAMD",
+ .opcode = 5012,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReadClockKHR",
+ .opcode = 5056,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAllocateNodePayloadsAMDX",
+ .opcode = 5074,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEnqueueNodePayloadsAMDX",
+ .opcode = 5075,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeNodePayloadArrayAMDX",
+ .opcode = 5076,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFinishWritingNodePayloadAMDX",
+ .opcode = 5078,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpNodePayloadArrayLengthAMDX",
+ .opcode = 5090,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIsNodePayloadValidAMDX",
+ .opcode = 5101,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantStringAMDX",
+ .opcode = 5103,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSpecConstantStringAMDX",
+ .opcode = 5104,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformQuadAllKHR",
+ .opcode = 5110,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformQuadAnyKHR",
+ .opcode = 5111,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordHitMotionNV",
+ .opcode = 5249,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordHitWithIndexMotionNV",
+ .opcode = 5250,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordMissMotionNV",
+ .opcode = 5251,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetWorldToObjectNV",
+ .opcode = 5252,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetObjectToWorldNV",
+ .opcode = 5253,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetObjectRayDirectionNV",
+ .opcode = 5254,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetObjectRayOriginNV",
+ .opcode = 5255,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectTraceRayMotionNV",
+ .opcode = 5256,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetShaderRecordBufferHandleNV",
+ .opcode = 5257,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetShaderBindingTableRecordIndexNV",
+ .opcode = 5258,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordEmptyNV",
+ .opcode = 5259,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectTraceRayNV",
+ .opcode = 5260,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordHitNV",
+ .opcode = 5261,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordHitWithIndexNV",
+ .opcode = 5262,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectRecordMissNV",
+ .opcode = 5263,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectExecuteShaderNV",
+ .opcode = 5264,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetCurrentTimeNV",
+ .opcode = 5265,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetAttributesNV",
+ .opcode = 5266,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetHitKindNV",
+ .opcode = 5267,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetPrimitiveIndexNV",
+ .opcode = 5268,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetGeometryIndexNV",
+ .opcode = 5269,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetInstanceIdNV",
+ .opcode = 5270,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetInstanceCustomIndexNV",
+ .opcode = 5271,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetWorldRayDirectionNV",
+ .opcode = 5272,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetWorldRayOriginNV",
+ .opcode = 5273,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetRayTMaxNV",
+ .opcode = 5274,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetRayTMinNV",
+ .opcode = 5275,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectIsEmptyNV",
+ .opcode = 5276,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectIsHitNV",
+ .opcode = 5277,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectIsMissNV",
+ .opcode = 5278,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReorderThreadWithHitObjectNV",
+ .opcode = 5279,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpReorderThreadWithHintNV",
+ .opcode = 5280,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeHitObjectNV",
+ .opcode = 5281,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpImageSampleFootprintNV",
+ .opcode = 5283,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .image_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpTypeCooperativeVectorNV",
+ .opcode = 5288,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorMatrixMulNV",
+ .opcode = 5289,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .cooperative_matrix_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorOuterProductAccumulateNV",
+ .opcode = 5290,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorReduceSumAccumulateNV",
+ .opcode = 5291,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorMatrixMulAddNV",
+ .opcode = 5292,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .cooperative_matrix_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixConvertNV",
+ .opcode = 5293,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpEmitMeshTasksEXT",
+ .opcode = 5294,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSetMeshOutputsEXT",
+ .opcode = 5295,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupNonUniformPartitionNV",
+ .opcode = 5296,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpWritePackedPrimitiveIndices4x8NV",
+ .opcode = 5299,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFetchMicroTriangleVertexPositionNV",
+ .opcode = 5300,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFetchMicroTriangleVertexBarycentricNV",
+ .opcode = 5301,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorLoadNV",
+ .opcode = 5302,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeVectorStoreNV",
+ .opcode = 5303,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpReportIntersectionKHR",
+ .opcode = 5334,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIgnoreIntersectionNV",
+ .opcode = 5335,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpTerminateRayNV",
+ .opcode = 5336,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpTraceNV",
+ .opcode = 5337,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTraceMotionNV",
+ .opcode = 5338,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTraceRayMotionNV",
+ .opcode = 5339,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionTriangleVertexPositionsKHR",
+ .opcode = 5340,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAccelerationStructureKHR",
+ .opcode = 5341,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExecuteCallableNV",
+ .opcode = 5344,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetClusterIdNV",
+ .opcode = 5345,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetClusterIdNV",
+ .opcode = 5346,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeCooperativeMatrixNV",
+ .opcode = 5358,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixLoadNV",
+ .opcode = 5359,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixStoreNV",
+ .opcode = 5360,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixMulAddNV",
+ .opcode = 5361,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixLengthNV",
+ .opcode = 5362,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpBeginInvocationInterlockEXT",
+ .opcode = 5364,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpEndInvocationInterlockEXT",
+ .opcode = 5365,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpCooperativeMatrixReduceNV",
+ .opcode = 5366,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .cooperative_matrix_reduce, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixLoadTensorNV",
+ .opcode = 5367,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .required },
+ .{ .kind = .tensor_addressing_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixStoreTensorNV",
+ .opcode = 5368,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .required },
+ .{ .kind = .tensor_addressing_operands, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixPerElementOpNV",
+ .opcode = 5369,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTypeTensorLayoutNV",
+ .opcode = 5370,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeTensorViewNV",
+ .opcode = 5371,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCreateTensorLayoutNV",
+ .opcode = 5372,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTensorLayoutSetDimensionNV",
+ .opcode = 5373,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTensorLayoutSetStrideNV",
+ .opcode = 5374,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTensorLayoutSliceNV",
+ .opcode = 5375,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTensorLayoutSetClampValueNV",
+ .opcode = 5376,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCreateTensorViewNV",
+ .opcode = 5377,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTensorViewSetDimensionNV",
+ .opcode = 5378,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTensorViewSetStrideNV",
+ .opcode = 5379,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpDemoteToHelperInvocation",
+ .opcode = 5380,
+ .operands = &.{},
+ },
+ .{
+ .name = "OpIsHelperInvocationEXT",
+ .opcode = 5381,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTensorViewSetClipNV",
+ .opcode = 5382,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTensorLayoutSetBlockSizeNV",
+ .opcode = 5384,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCooperativeMatrixTransposeNV",
+ .opcode = 5390,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToImageNV",
+ .opcode = 5391,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToSamplerNV",
+ .opcode = 5392,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertImageToUNV",
+ .opcode = 5393,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertSamplerToUNV",
+ .opcode = 5394,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertUToSampledImageNV",
+ .opcode = 5395,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertSampledImageToUNV",
+ .opcode = 5396,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSamplerImageAddressingModeNV",
+ .opcode = 5397,
+ .operands = &.{
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRawAccessChainNV",
+ .opcode = 5398,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .raw_access_chain_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionSpherePositionNV",
+ .opcode = 5427,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionSphereRadiusNV",
+ .opcode = 5428,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionLSSPositionsNV",
+ .opcode = 5429,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionLSSRadiiNV",
+ .opcode = 5430,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionLSSHitValueNV",
+ .opcode = 5431,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetSpherePositionNV",
+ .opcode = 5432,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetSphereRadiusNV",
+ .opcode = 5433,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetLSSPositionsNV",
+ .opcode = 5434,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectGetLSSRadiiNV",
+ .opcode = 5435,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectIsSphereHitNV",
+ .opcode = 5436,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpHitObjectIsLSSHitNV",
+ .opcode = 5437,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryIsSphereHitNV",
+ .opcode = 5438,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryIsLSSHitNV",
+ .opcode = 5439,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupShuffleINTEL",
+ .opcode = 5571,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupShuffleDownINTEL",
+ .opcode = 5572,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupShuffleUpINTEL",
+ .opcode = 5573,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupShuffleXorINTEL",
+ .opcode = 5574,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupBlockReadINTEL",
+ .opcode = 5575,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupBlockWriteINTEL",
+ .opcode = 5576,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupImageBlockReadINTEL",
+ .opcode = 5577,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupImageBlockWriteINTEL",
+ .opcode = 5578,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupImageMediaBlockReadINTEL",
+ .opcode = 5580,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupImageMediaBlockWriteINTEL",
+ .opcode = 5581,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUCountLeadingZerosINTEL",
+ .opcode = 5585,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUCountTrailingZerosINTEL",
+ .opcode = 5586,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAbsISubINTEL",
+ .opcode = 5587,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAbsUSubINTEL",
+ .opcode = 5588,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIAddSatINTEL",
+ .opcode = 5589,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUAddSatINTEL",
+ .opcode = 5590,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIAverageINTEL",
+ .opcode = 5591,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUAverageINTEL",
+ .opcode = 5592,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIAverageRoundedINTEL",
+ .opcode = 5593,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUAverageRoundedINTEL",
+ .opcode = 5594,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpISubSatINTEL",
+ .opcode = 5595,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUSubSatINTEL",
+ .opcode = 5596,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpIMul32x16INTEL",
+ .opcode = 5597,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpUMul32x16INTEL",
+ .opcode = 5598,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConstantFunctionPointerINTEL",
+ .opcode = 5600,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFunctionPointerCallINTEL",
+ .opcode = 5601,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpAsmTargetINTEL",
+ .opcode = 5609,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAsmINTEL",
+ .opcode = 5610,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ .{ .kind = .literal_string, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAsmCallINTEL",
+ .opcode = 5611,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpAtomicFMinEXT",
+ .opcode = 5614,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicFMaxEXT",
+ .opcode = 5615,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAssumeTrueKHR",
+ .opcode = 5630,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpExpectKHR",
+ .opcode = 5631,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpDecorateString",
+ .opcode = 5632,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .decoration, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMemberDecorateString",
+ .opcode = 5633,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .decoration, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVmeImageINTEL",
+ .opcode = 5699,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeVmeImageINTEL",
+ .opcode = 5700,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImePayloadINTEL",
+ .opcode = 5701,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcRefPayloadINTEL",
+ .opcode = 5702,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcSicPayloadINTEL",
+ .opcode = 5703,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcMcePayloadINTEL",
+ .opcode = 5704,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcMceResultINTEL",
+ .opcode = 5705,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImeResultINTEL",
+ .opcode = 5706,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImeResultSingleReferenceStreamoutINTEL",
+ .opcode = 5707,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImeResultDualReferenceStreamoutINTEL",
+ .opcode = 5708,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImeSingleReferenceStreaminINTEL",
+ .opcode = 5709,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcImeDualReferenceStreaminINTEL",
+ .opcode = 5710,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcRefResultINTEL",
+ .opcode = 5711,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeAvcSicResultINTEL",
+ .opcode = 5712,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL",
+ .opcode = 5713,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL",
+ .opcode = 5714,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL",
+ .opcode = 5715,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetInterShapePenaltyINTEL",
+ .opcode = 5716,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL",
+ .opcode = 5717,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetInterDirectionPenaltyINTEL",
+ .opcode = 5718,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL",
+ .opcode = 5719,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL",
+ .opcode = 5720,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL",
+ .opcode = 5721,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL",
+ .opcode = 5722,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL",
+ .opcode = 5723,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL",
+ .opcode = 5724,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL",
+ .opcode = 5725,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL",
+ .opcode = 5726,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL",
+ .opcode = 5727,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetAcOnlyHaarINTEL",
+ .opcode = 5728,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL",
+ .opcode = 5729,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL",
+ .opcode = 5730,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL",
+ .opcode = 5731,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToImePayloadINTEL",
+ .opcode = 5732,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToImeResultINTEL",
+ .opcode = 5733,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToRefPayloadINTEL",
+ .opcode = 5734,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToRefResultINTEL",
+ .opcode = 5735,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToSicPayloadINTEL",
+ .opcode = 5736,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceConvertToSicResultINTEL",
+ .opcode = 5737,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetMotionVectorsINTEL",
+ .opcode = 5738,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterDistortionsINTEL",
+ .opcode = 5739,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetBestInterDistortionsINTEL",
+ .opcode = 5740,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterMajorShapeINTEL",
+ .opcode = 5741,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterMinorShapeINTEL",
+ .opcode = 5742,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterDirectionsINTEL",
+ .opcode = 5743,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterMotionVectorCountINTEL",
+ .opcode = 5744,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterReferenceIdsINTEL",
+ .opcode = 5745,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL",
+ .opcode = 5746,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeInitializeINTEL",
+ .opcode = 5747,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetSingleReferenceINTEL",
+ .opcode = 5748,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetDualReferenceINTEL",
+ .opcode = 5749,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeRefWindowSizeINTEL",
+ .opcode = 5750,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeAdjustRefOffsetINTEL",
+ .opcode = 5751,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeConvertToMcePayloadINTEL",
+ .opcode = 5752,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetMaxMotionVectorCountINTEL",
+ .opcode = 5753,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL",
+ .opcode = 5754,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL",
+ .opcode = 5755,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeSetWeightedSadINTEL",
+ .opcode = 5756,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL",
+ .opcode = 5757,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithDualReferenceINTEL",
+ .opcode = 5758,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL",
+ .opcode = 5759,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL",
+ .opcode = 5760,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL",
+ .opcode = 5761,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL",
+ .opcode = 5762,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL",
+ .opcode = 5763,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL",
+ .opcode = 5764,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeConvertToMceResultINTEL",
+ .opcode = 5765,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetSingleReferenceStreaminINTEL",
+ .opcode = 5766,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetDualReferenceStreaminINTEL",
+ .opcode = 5767,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL",
+ .opcode = 5768,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeStripDualReferenceStreamoutINTEL",
+ .opcode = 5769,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL",
+ .opcode = 5770,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL",
+ .opcode = 5771,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL",
+ .opcode = 5772,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL",
+ .opcode = 5773,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL",
+ .opcode = 5774,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL",
+ .opcode = 5775,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetBorderReachedINTEL",
+ .opcode = 5776,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL",
+ .opcode = 5777,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL",
+ .opcode = 5778,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL",
+ .opcode = 5779,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL",
+ .opcode = 5780,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcFmeInitializeINTEL",
+ .opcode = 5781,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcBmeInitializeINTEL",
+ .opcode = 5782,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefConvertToMcePayloadINTEL",
+ .opcode = 5783,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefSetBidirectionalMixDisableINTEL",
+ .opcode = 5784,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefSetBilinearFilterEnableINTEL",
+ .opcode = 5785,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL",
+ .opcode = 5786,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefEvaluateWithDualReferenceINTEL",
+ .opcode = 5787,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL",
+ .opcode = 5788,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL",
+ .opcode = 5789,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcRefConvertToMceResultINTEL",
+ .opcode = 5790,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicInitializeINTEL",
+ .opcode = 5791,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicConfigureSkcINTEL",
+ .opcode = 5792,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicConfigureIpeLumaINTEL",
+ .opcode = 5793,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicConfigureIpeLumaChromaINTEL",
+ .opcode = 5794,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetMotionVectorMaskINTEL",
+ .opcode = 5795,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicConvertToMcePayloadINTEL",
+ .opcode = 5796,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL",
+ .opcode = 5797,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL",
+ .opcode = 5798,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL",
+ .opcode = 5799,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetBilinearFilterEnableINTEL",
+ .opcode = 5800,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL",
+ .opcode = 5801,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL",
+ .opcode = 5802,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicEvaluateIpeINTEL",
+ .opcode = 5803,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL",
+ .opcode = 5804,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicEvaluateWithDualReferenceINTEL",
+ .opcode = 5805,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL",
+ .opcode = 5806,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL",
+ .opcode = 5807,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicConvertToMceResultINTEL",
+ .opcode = 5808,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetIpeLumaShapeINTEL",
+ .opcode = 5809,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL",
+ .opcode = 5810,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL",
+ .opcode = 5811,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetPackedIpeLumaModesINTEL",
+ .opcode = 5812,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetIpeChromaModeINTEL",
+ .opcode = 5813,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL",
+ .opcode = 5814,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL",
+ .opcode = 5815,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupAvcSicGetInterRawSadsINTEL",
+ .opcode = 5816,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpVariableLengthArrayINTEL",
+ .opcode = 5818,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSaveMemoryINTEL",
+ .opcode = 5819,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRestoreMemoryINTEL",
+ .opcode = 5820,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSinCosPiINTEL",
+ .opcode = 5840,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCastINTEL",
+ .opcode = 5841,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCastFromIntINTEL",
+ .opcode = 5842,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCastToIntINTEL",
+ .opcode = 5843,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatAddINTEL",
+ .opcode = 5846,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSubINTEL",
+ .opcode = 5847,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatMulINTEL",
+ .opcode = 5848,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatDivINTEL",
+ .opcode = 5849,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatGTINTEL",
+ .opcode = 5850,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatGEINTEL",
+ .opcode = 5851,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLTINTEL",
+ .opcode = 5852,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLEINTEL",
+ .opcode = 5853,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatEQINTEL",
+ .opcode = 5854,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatRecipINTEL",
+ .opcode = 5855,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatRSqrtINTEL",
+ .opcode = 5856,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCbrtINTEL",
+ .opcode = 5857,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatHypotINTEL",
+ .opcode = 5858,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSqrtINTEL",
+ .opcode = 5859,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLogINTEL",
+ .opcode = 5860,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLog2INTEL",
+ .opcode = 5861,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLog10INTEL",
+ .opcode = 5862,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatLog1pINTEL",
+ .opcode = 5863,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatExpINTEL",
+ .opcode = 5864,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatExp2INTEL",
+ .opcode = 5865,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatExp10INTEL",
+ .opcode = 5866,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatExpm1INTEL",
+ .opcode = 5867,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSinINTEL",
+ .opcode = 5868,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCosINTEL",
+ .opcode = 5869,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSinCosINTEL",
+ .opcode = 5870,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatSinPiINTEL",
+ .opcode = 5871,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatCosPiINTEL",
+ .opcode = 5872,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatASinINTEL",
+ .opcode = 5873,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatASinPiINTEL",
+ .opcode = 5874,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatACosINTEL",
+ .opcode = 5875,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatACosPiINTEL",
+ .opcode = 5876,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatATanINTEL",
+ .opcode = 5877,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatATanPiINTEL",
+ .opcode = 5878,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatATan2INTEL",
+ .opcode = 5879,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatPowINTEL",
+ .opcode = 5880,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatPowRINTEL",
+ .opcode = 5881,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArbitraryFloatPowNINTEL",
+ .opcode = 5882,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpLoopControlINTEL",
+ .opcode = 5887,
+ .operands = &.{
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpAliasDomainDeclINTEL",
+ .opcode = 5911,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpAliasScopeDeclINTEL",
+ .opcode = 5912,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpAliasScopeListDeclINTEL",
+ .opcode = 5913,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpFixedSqrtINTEL",
+ .opcode = 5923,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedRecipINTEL",
+ .opcode = 5924,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedRsqrtINTEL",
+ .opcode = 5925,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedSinINTEL",
+ .opcode = 5926,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedCosINTEL",
+ .opcode = 5927,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedSinCosINTEL",
+ .opcode = 5928,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedSinPiINTEL",
+ .opcode = 5929,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedCosPiINTEL",
+ .opcode = 5930,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedSinCosPiINTEL",
+ .opcode = 5931,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedLogINTEL",
+ .opcode = 5932,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFixedExpINTEL",
+ .opcode = 5933,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpPtrCastToCrossWorkgroupINTEL",
+ .opcode = 5934,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpCrossWorkgroupCastToPtrINTEL",
+ .opcode = 5938,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpReadPipeBlockingINTEL",
+ .opcode = 5946,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpWritePipeBlockingINTEL",
+ .opcode = 5947,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpFPGARegINTEL",
+ .opcode = 5949,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetRayTMinKHR",
+ .opcode = 6016,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetRayFlagsKHR",
+ .opcode = 6017,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionTKHR",
+ .opcode = 6018,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionInstanceCustomIndexKHR",
+ .opcode = 6019,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionInstanceIdKHR",
+ .opcode = 6020,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR",
+ .opcode = 6021,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionGeometryIndexKHR",
+ .opcode = 6022,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionPrimitiveIndexKHR",
+ .opcode = 6023,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionBarycentricsKHR",
+ .opcode = 6024,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionFrontFaceKHR",
+ .opcode = 6025,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionCandidateAABBOpaqueKHR",
+ .opcode = 6026,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionObjectRayDirectionKHR",
+ .opcode = 6027,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionObjectRayOriginKHR",
+ .opcode = 6028,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetWorldRayDirectionKHR",
+ .opcode = 6029,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetWorldRayOriginKHR",
+ .opcode = 6030,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionObjectToWorldKHR",
+ .opcode = 6031,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRayQueryGetIntersectionWorldToObjectKHR",
+ .opcode = 6032,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpAtomicFAddEXT",
+ .opcode = 6035,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeBufferSurfaceINTEL",
+ .opcode = 6086,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .access_qualifier, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeStructContinuedINTEL",
+ .opcode = 6090,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpConstantCompositeContinuedINTEL",
+ .opcode = 6091,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpSpecConstantCompositeContinuedINTEL",
+ .opcode = 6092,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpCompositeConstructContinuedINTEL",
+ .opcode = 6096,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpConvertFToBF16INTEL",
+ .opcode = 6116,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertBF16ToFINTEL",
+ .opcode = 6117,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpControlBarrierArriveINTEL",
+ .opcode = 6142,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpControlBarrierWaitINTEL",
+ .opcode = 6143,
+ .operands = &.{
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .id_memory_semantics, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpArithmeticFenceEXT",
+ .opcode = 6145,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTaskSequenceCreateINTEL",
+ .opcode = 6163,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTaskSequenceAsyncINTEL",
+ .opcode = 6164,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "OpTaskSequenceGetINTEL",
+ .opcode = 6165,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTaskSequenceReleaseINTEL",
+ .opcode = 6166,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpTypeTaskSequenceINTEL",
+ .opcode = 6199,
+ .operands = &.{
+ .{ .kind = .id_result, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupBlockPrefetchINTEL",
+ .opcode = 6221,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .memory_access, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpSubgroup2DBlockLoadINTEL",
+ .opcode = 6231,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroup2DBlockLoadTransformINTEL",
+ .opcode = 6232,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroup2DBlockLoadTransposeINTEL",
+ .opcode = 6233,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroup2DBlockPrefetchINTEL",
+ .opcode = 6234,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroup2DBlockStoreINTEL",
+ .opcode = 6235,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpSubgroupMatrixMultiplyAccumulateINTEL",
+ .opcode = 6237,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .matrix_multiply_accumulate_operands, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "OpBitwiseFunctionINTEL",
+ .opcode = 6242,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupIMulKHR",
+ .opcode = 6401,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupFMulKHR",
+ .opcode = 6402,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupBitwiseAndKHR",
+ .opcode = 6403,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupBitwiseOrKHR",
+ .opcode = 6404,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupBitwiseXorKHR",
+ .opcode = 6405,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupLogicalAndKHR",
+ .opcode = 6406,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupLogicalOrKHR",
+ .opcode = 6407,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpGroupLogicalXorKHR",
+ .opcode = 6408,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_scope, .quantifier = .required },
+ .{ .kind = .group_operation, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpRoundFToTF32INTEL",
+ .opcode = 6426,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMaskedGatherINTEL",
+ .opcode = 6428,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpMaskedScatterINTEL",
+ .opcode = 6429,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertHandleToImageINTEL",
+ .opcode = 6529,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertHandleToSamplerINTEL",
+ .opcode = 6530,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "OpConvertHandleToSampledImageINTEL",
+ .opcode = 6531,
+ .operands = &.{
+ .{ .kind = .id_result_type, .quantifier = .required },
+ .{ .kind = .id_result, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .SPV_AMD_shader_trinary_minmax => &.{
+ .{
+ .name = "FMin3AMD",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UMin3AMD",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SMin3AMD",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FMax3AMD",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UMax3AMD",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SMax3AMD",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FMid3AMD",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UMid3AMD",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SMid3AMD",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .SPV_EXT_INST_TYPE_TOSA_001000_1 => &.{
+ .{
+ .name = "ARGMAX",
+ .opcode = 0,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "AVG_POOL2D",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CONV2D",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CONV3D",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DEPTHWISE_CONV2D",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FFT2D",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MATMUL",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MAX_POOL2D",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RFFT2D",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TRANSPOSE_CONV2D",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CLAMP",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ERF",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SIGMOID",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TANH",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ADD",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ARITHMETIC_RIGHT_SHIFT",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "BITWISE_AND",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "BITWISE_OR",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "BITWISE_XOR",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "INTDIV",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_AND",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_LEFT_SHIFT",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_RIGHT_SHIFT",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_OR",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_XOR",
+ .opcode = 24,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MAXIMUM",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MINIMUM",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MUL",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "POW",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SUB",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TABLE",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ABS",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "BITWISE_NOT",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CEIL",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CLZ",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "COS",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "EXP",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FLOOR",
+ .opcode = 37,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOG",
+ .opcode = 38,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LOGICAL_NOT",
+ .opcode = 39,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "NEGATE",
+ .opcode = 40,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RECIPROCAL",
+ .opcode = 41,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RSQRT",
+ .opcode = 42,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SIN",
+ .opcode = 43,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SELECT",
+ .opcode = 44,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "EQUAL",
+ .opcode = 45,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "GREATER",
+ .opcode = 46,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "GREATER_EQUAL",
+ .opcode = 47,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_ALL",
+ .opcode = 48,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_ANY",
+ .opcode = 49,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_MAX",
+ .opcode = 50,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_MIN",
+ .opcode = 51,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_PRODUCT",
+ .opcode = 52,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REDUCE_SUM",
+ .opcode = 53,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CONCAT",
+ .opcode = 54,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "PAD",
+ .opcode = 55,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RESHAPE",
+ .opcode = 56,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "REVERSE",
+ .opcode = 57,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SLICE",
+ .opcode = 58,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TILE",
+ .opcode = 59,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TRANSPOSE",
+ .opcode = 60,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "GATHER",
+ .opcode = 61,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SCATTER",
+ .opcode = 62,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RESIZE",
+ .opcode = 63,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CAST",
+ .opcode = 64,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RESCALE",
+ .opcode = 65,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .@"NonSemantic.VkspReflection" => &.{
+ .{
+ .name = "Configuration",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "StartCounter",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "StopCounter",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstants",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SpecializationMapEntry",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DescriptorSetBuffer",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DescriptorSetImage",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DescriptorSetSampler",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .SPV_AMD_shader_explicit_vertex_parameter => &.{
+ .{
+ .name = "InterpolateAtVertexAMD",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .DebugInfo => &.{
+ .{
+ .name = "DebugInfoNone",
+ .opcode = 0,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugCompilationUnit",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeBasic",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_base_type_attribute_encoding, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePointer",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeQualifier",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_type_qualifier, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeArray",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeVector",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypedef",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeFunction",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeEnum",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ .{ .kind = .pair_id_ref_id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeComposite",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_composite_type, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeMember",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugTypeInheritance",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePtrToMember",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplate",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameter",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateTemplateParameter",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameterPack",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugGlobalVariable",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugFunctionDeclaration",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugFunction",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .debug_info_debug_info_flags, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlock",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlockDiscriminator",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugScope",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugNoScope",
+ .opcode = 24,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugInlinedAt",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLocalVariable",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugInlinedVariable",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugDeclare",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugValue",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugOperation",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .debug_info_debug_operation, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugExpression",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugMacroDef",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugMacroUndef",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .@"NonSemantic.DebugBreak" => &.{
+ .{
+ .name = "DebugBreak",
+ .opcode = 1,
+ .operands = &.{},
+ },
+ },
+ .@"OpenCL.DebugInfo.100" => &.{
+ .{
+ .name = "DebugInfoNone",
+ .opcode = 0,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugCompilationUnit",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .source_language, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeBasic",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_base_type_attribute_encoding, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePointer",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .storage_class, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeQualifier",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_type_qualifier, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeArray",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeVector",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypedef",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeFunction",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeEnum",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .pair_id_ref_id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeComposite",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_composite_type, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeMember",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugTypeInheritance",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePtrToMember",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplate",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameter",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateTemplateParameter",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameterPack",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugGlobalVariable",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugFunctionDeclaration",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugFunction",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlock",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlockDiscriminator",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugScope",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugNoScope",
+ .opcode = 24,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugInlinedAt",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLocalVariable",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_info_flags, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugInlinedVariable",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugDeclare",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugValue",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugOperation",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .open_cl_debug_info_100_debug_operation, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugExpression",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugMacroDef",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugMacroUndef",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugImportedEntity",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .open_cl_debug_info_100_debug_imported_entity, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugSource",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugModuleINTEL",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ },
+ .@"NonSemantic.ClspvReflection.6" => &.{
+ .{
+ .name = "Kernel",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentInfo",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentStorageBuffer",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentUniform",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentPodStorageBuffer",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentPodUniform",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentPodPushConstant",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentSampledImage",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentStorageImage",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentSampler",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentWorkgroup",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "SpecConstantWorkgroupSize",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SpecConstantGlobalOffset",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SpecConstantWorkDim",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantGlobalOffset",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantEnqueuedLocalSize",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantGlobalSize",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantRegionOffset",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantNumWorkgroups",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PushConstantRegionGroupOffset",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ConstantDataStorageBuffer",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ConstantDataUniform",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "LiteralSampler",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PropertyRequiredWorkgroupSize",
+ .opcode = 24,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SpecConstantSubgroupMaxSize",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ArgumentPointerPushConstant",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentPointerUniform",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ProgramScopeVariablesStorageBuffer",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ProgramScopeVariablePointerRelocation",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ImageArgumentInfoChannelOrderPushConstant",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ImageArgumentInfoChannelDataTypePushConstant",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ImageArgumentInfoChannelOrderUniform",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ImageArgumentInfoChannelDataTypeUniform",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ArgumentStorageTexelBuffer",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ArgumentUniformTexelBuffer",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "ConstantDataPointerPushConstant",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ProgramScopeVariablePointerPushConstant",
+ .opcode = 37,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PrintfInfo",
+ .opcode = 38,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "PrintfBufferStorageBuffer",
+ .opcode = 39,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PrintfBufferPointerPushConstant",
+ .opcode = 40,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "NormalizedSamplerMaskPushConstant",
+ .opcode = 41,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "WorkgroupVariableSize",
+ .opcode = 42,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .@"GLSL.std.450" => &.{
+ .{
+ .name = "Round",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "RoundEven",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Trunc",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FAbs",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SAbs",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FSign",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SSign",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Floor",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Ceil",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Fract",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Radians",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Degrees",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Sin",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Cos",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Tan",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Asin",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Acos",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Atan",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Sinh",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Cosh",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Tanh",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Asinh",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Acosh",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Atanh",
+ .opcode = 24,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Atan2",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Pow",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Exp",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Log",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Exp2",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Log2",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Sqrt",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "InverseSqrt",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Determinant",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MatrixInverse",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Modf",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ModfStruct",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FMin",
+ .opcode = 37,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UMin",
+ .opcode = 38,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SMin",
+ .opcode = 39,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FMax",
+ .opcode = 40,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UMax",
+ .opcode = 41,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SMax",
+ .opcode = 42,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FClamp",
+ .opcode = 43,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UClamp",
+ .opcode = 44,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SClamp",
+ .opcode = 45,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FMix",
+ .opcode = 46,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "IMix",
+ .opcode = 47,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Step",
+ .opcode = 48,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SmoothStep",
+ .opcode = 49,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Fma",
+ .opcode = 50,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Frexp",
+ .opcode = 51,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FrexpStruct",
+ .opcode = 52,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Ldexp",
+ .opcode = 53,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackSnorm4x8",
+ .opcode = 54,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackUnorm4x8",
+ .opcode = 55,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackSnorm2x16",
+ .opcode = 56,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackUnorm2x16",
+ .opcode = 57,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackHalf2x16",
+ .opcode = 58,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "PackDouble2x32",
+ .opcode = 59,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackSnorm2x16",
+ .opcode = 60,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackUnorm2x16",
+ .opcode = 61,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackHalf2x16",
+ .opcode = 62,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackSnorm4x8",
+ .opcode = 63,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackUnorm4x8",
+ .opcode = 64,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "UnpackDouble2x32",
+ .opcode = 65,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Length",
+ .opcode = 66,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Distance",
+ .opcode = 67,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Cross",
+ .opcode = 68,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Normalize",
+ .opcode = 69,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FaceForward",
+ .opcode = 70,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Reflect",
+ .opcode = 71,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "Refract",
+ .opcode = 72,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FindILsb",
+ .opcode = 73,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FindSMsb",
+ .opcode = 74,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "FindUMsb",
+ .opcode = 75,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "InterpolateAtCentroid",
+ .opcode = 76,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "InterpolateAtSample",
+ .opcode = 77,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "InterpolateAtOffset",
+ .opcode = 78,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "NMin",
+ .opcode = 79,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "NMax",
+ .opcode = 80,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "NClamp",
+ .opcode = 81,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .SPV_AMD_shader_ballot => &.{
+ .{
+ .name = "SwizzleInvocationsAMD",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "SwizzleInvocationsMaskedAMD",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "WriteInvocationAMD",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "MbcntAMD",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .@"NonSemantic.DebugPrintf" => &.{
+ .{
+ .name = "DebugPrintf",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ },
+ .SPV_AMD_gcn_shader => &.{
+ .{
+ .name = "CubeFaceIndexAMD",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "CubeFaceCoordAMD",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "TimeAMD",
+ .opcode = 3,
+ .operands = &.{},
+ },
+ },
+ .@"OpenCL.std" => &.{
+ .{
+ .name = "acos",
+ .opcode = 0,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "acosh",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "acospi",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "asin",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "asinh",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "asinpi",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "atan",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "atan2",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "atanh",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "atanpi",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "atan2pi",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "cbrt",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ceil",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "copysign",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "cos",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "cosh",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "cospi",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "erfc",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "erf",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "exp",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "exp2",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "exp10",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "expm1",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fabs",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fdim",
+ .opcode = 24,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "floor",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fma",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fmax",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fmin",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fmod",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fract",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "frexp",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "hypot",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ilogb",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ldexp",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "lgamma",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "lgamma_r",
+ .opcode = 36,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "log",
+ .opcode = 37,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "log2",
+ .opcode = 38,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "log10",
+ .opcode = 39,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "log1p",
+ .opcode = 40,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "logb",
+ .opcode = 41,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "mad",
+ .opcode = 42,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "maxmag",
+ .opcode = 43,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "minmag",
+ .opcode = 44,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "modf",
+ .opcode = 45,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "nan",
+ .opcode = 46,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "nextafter",
+ .opcode = 47,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "pow",
+ .opcode = 48,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "pown",
+ .opcode = 49,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "powr",
+ .opcode = 50,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "remainder",
+ .opcode = 51,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "remquo",
+ .opcode = 52,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "rint",
+ .opcode = 53,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "rootn",
+ .opcode = 54,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "round",
+ .opcode = 55,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "rsqrt",
+ .opcode = 56,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sin",
+ .opcode = 57,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sincos",
+ .opcode = 58,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sinh",
+ .opcode = 59,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sinpi",
+ .opcode = 60,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sqrt",
+ .opcode = 61,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "tan",
+ .opcode = 62,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "tanh",
+ .opcode = 63,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "tanpi",
+ .opcode = 64,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "tgamma",
+ .opcode = 65,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "trunc",
+ .opcode = 66,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_cos",
+ .opcode = 67,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_divide",
+ .opcode = 68,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_exp",
+ .opcode = 69,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_exp2",
+ .opcode = 70,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_exp10",
+ .opcode = 71,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_log",
+ .opcode = 72,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_log2",
+ .opcode = 73,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_log10",
+ .opcode = 74,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_powr",
+ .opcode = 75,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_recip",
+ .opcode = 76,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_rsqrt",
+ .opcode = 77,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_sin",
+ .opcode = 78,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_sqrt",
+ .opcode = 79,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "half_tan",
+ .opcode = 80,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_cos",
+ .opcode = 81,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_divide",
+ .opcode = 82,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_exp",
+ .opcode = 83,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_exp2",
+ .opcode = 84,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_exp10",
+ .opcode = 85,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_log",
+ .opcode = 86,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_log2",
+ .opcode = 87,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_log10",
+ .opcode = 88,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_powr",
+ .opcode = 89,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_recip",
+ .opcode = 90,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_rsqrt",
+ .opcode = 91,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_sin",
+ .opcode = 92,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_sqrt",
+ .opcode = 93,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "native_tan",
+ .opcode = 94,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fclamp",
+ .opcode = 95,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "degrees",
+ .opcode = 96,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fmax_common",
+ .opcode = 97,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fmin_common",
+ .opcode = 98,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "mix",
+ .opcode = 99,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "radians",
+ .opcode = 100,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "step",
+ .opcode = 101,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "smoothstep",
+ .opcode = 102,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "sign",
+ .opcode = 103,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "cross",
+ .opcode = 104,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "distance",
+ .opcode = 105,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "length",
+ .opcode = 106,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "normalize",
+ .opcode = 107,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fast_distance",
+ .opcode = 108,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fast_length",
+ .opcode = 109,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "fast_normalize",
+ .opcode = 110,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_abs",
+ .opcode = 141,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_abs_diff",
+ .opcode = 142,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_add_sat",
+ .opcode = 143,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_add_sat",
+ .opcode = 144,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_hadd",
+ .opcode = 145,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_hadd",
+ .opcode = 146,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_rhadd",
+ .opcode = 147,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_rhadd",
+ .opcode = 148,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_clamp",
+ .opcode = 149,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_clamp",
+ .opcode = 150,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "clz",
+ .opcode = 151,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "ctz",
+ .opcode = 152,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_mad_hi",
+ .opcode = 153,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_mad_sat",
+ .opcode = 154,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_mad_sat",
+ .opcode = 155,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_max",
+ .opcode = 156,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_max",
+ .opcode = 157,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_min",
+ .opcode = 158,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_min",
+ .opcode = 159,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_mul_hi",
+ .opcode = 160,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "rotate",
+ .opcode = 161,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_sub_sat",
+ .opcode = 162,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_sub_sat",
+ .opcode = 163,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_upsample",
+ .opcode = 164,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_upsample",
+ .opcode = 165,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "popcount",
+ .opcode = 166,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_mad24",
+ .opcode = 167,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_mad24",
+ .opcode = 168,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "s_mul24",
+ .opcode = 169,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_mul24",
+ .opcode = 170,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vloadn",
+ .opcode = 171,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstoren",
+ .opcode = 172,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vload_half",
+ .opcode = 173,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vload_halfn",
+ .opcode = 174,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstore_half",
+ .opcode = 175,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstore_half_r",
+ .opcode = 176,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .fp_rounding_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstore_halfn",
+ .opcode = 177,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstore_halfn_r",
+ .opcode = 178,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .fp_rounding_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vloada_halfn",
+ .opcode = 179,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .literal_integer, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstorea_halfn",
+ .opcode = 180,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "vstorea_halfn_r",
+ .opcode = 181,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .fp_rounding_mode, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "shuffle",
+ .opcode = 182,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "shuffle2",
+ .opcode = 183,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "printf",
+ .opcode = 184,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "prefetch",
+ .opcode = 185,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "bitselect",
+ .opcode = 186,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "select",
+ .opcode = 187,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_abs",
+ .opcode = 201,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_abs_diff",
+ .opcode = 202,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_mul_hi",
+ .opcode = 203,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "u_mad_hi",
+ .opcode = 204,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .@"NonSemantic.Shader.DebugInfo.100" => &.{
+ .{
+ .name = "DebugInfoNone",
+ .opcode = 0,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugCompilationUnit",
+ .opcode = 1,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeBasic",
+ .opcode = 2,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePointer",
+ .opcode = 3,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeQualifier",
+ .opcode = 4,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeArray",
+ .opcode = 5,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeVector",
+ .opcode = 6,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypedef",
+ .opcode = 7,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeFunction",
+ .opcode = 8,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeEnum",
+ .opcode = 9,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .pair_id_ref_id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeComposite",
+ .opcode = 10,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeMember",
+ .opcode = 11,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugTypeInheritance",
+ .opcode = 12,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypePtrToMember",
+ .opcode = 13,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplate",
+ .opcode = 14,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameter",
+ .opcode = 15,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateTemplateParameter",
+ .opcode = 16,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeTemplateParameterPack",
+ .opcode = 17,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugGlobalVariable",
+ .opcode = 18,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugFunctionDeclaration",
+ .opcode = 19,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugFunction",
+ .opcode = 20,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlock",
+ .opcode = 21,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLexicalBlockDiscriminator",
+ .opcode = 22,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugScope",
+ .opcode = 23,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugNoScope",
+ .opcode = 24,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugInlinedAt",
+ .opcode = 25,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugLocalVariable",
+ .opcode = 26,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugInlinedVariable",
+ .opcode = 27,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugDeclare",
+ .opcode = 28,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugValue",
+ .opcode = 29,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugOperation",
+ .opcode = 30,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugExpression",
+ .opcode = 31,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .variadic },
+ },
+ },
+ .{
+ .name = "DebugMacroDef",
+ .opcode = 32,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugMacroUndef",
+ .opcode = 33,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugImportedEntity",
+ .opcode = 34,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugSource",
+ .opcode = 35,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .optional },
+ },
+ },
+ .{
+ .name = "DebugFunctionDefinition",
+ .opcode = 101,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugSourceContinued",
+ .opcode = 102,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugLine",
+ .opcode = 103,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugNoLine",
+ .opcode = 104,
+ .operands = &.{},
+ },
+ .{
+ .name = "DebugBuildIdentifier",
+ .opcode = 105,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugStoragePath",
+ .opcode = 106,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugEntryPoint",
+ .opcode = 107,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ .{
+ .name = "DebugTypeMatrix",
+ .opcode = 108,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ .zig => &.{
+ .{
+ .name = "InvocationGlobal",
+ .opcode = 0,
+ .operands = &.{
+ .{ .kind = .id_ref, .quantifier = .required },
+ },
+ },
+ },
+ };
+ }
+};