diff options
36 files changed, 7011 insertions, 878 deletions
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 59ae03e8b7..7a278e070e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -4,22 +4,20 @@ on: push: branches: - master +concurrency: + # Cancels pending runs when a PR gets updated. + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true jobs: x86_64-linux-debug: - outputs: - version: ${{ steps.version.outputs.version }} runs-on: [self-hosted, Linux, x86_64] steps: - name: Checkout uses: actions/checkout@v3 - name: Build and Test run: sh ./ci/linux/build-x86_64-debug.sh - # The following step is required by the build-tarballs job. - # If this job is being deleted / commented out, make sure - # to have another job provide this information. - - name: Get Version - id: version - run: echo "version=$(stage3/bin/zig version)" >> $GITHUB_OUTPUT + - name: Print Version + run: echo "$(build-debug/stage3-debug/bin/zig version)" x86_64-linux-release: runs-on: [self-hosted, Linux, x86_64] steps: diff --git a/ci/linux/build-aarch64.sh b/ci/linux/build-aarch64.sh index 07e3d3cf09..2f57e06f05 100644 --- a/ci/linux/build-aarch64.sh +++ b/ci/linux/build-aarch64.sh @@ -63,7 +63,4 @@ stage3-release/bin/zig build test docs \ tidy --drop-empty-elements no -qe ../zig-cache/langref.html # Produce the experimental std lib documentation. -stage3-release/bin/zig test ../lib/std/std.zig \ - -femit-docs \ - -fno-emit-bin \ - --zig-lib-dir "$(pwd)/../lib" +stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib diff --git a/ci/linux/build-x86_64-debug.sh b/ci/linux/build-x86_64-debug.sh index a1eb26c962..68bb3e42d0 100755 --- a/ci/linux/build-x86_64-debug.sh +++ b/ci/linux/build-x86_64-debug.sh @@ -67,7 +67,4 @@ stage3-debug/bin/zig build test \ #tidy --drop-empty-elements no -qe ../zig-cache/langref.html # Produce the experimental std lib documentation. -stage3-debug/bin/zig test ../lib/std/std.zig \ - -femit-docs \ - -fno-emit-bin \ - --zig-lib-dir "$(pwd)/../lib" +stage3-debug/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib diff --git a/ci/linux/build-x86_64-release.sh b/ci/linux/build-x86_64-release.sh index 1155b3c8f7..fbcf86e418 100755 --- a/ci/linux/build-x86_64-release.sh +++ b/ci/linux/build-x86_64-release.sh @@ -63,10 +63,7 @@ stage3-release/bin/zig build test docs \ tidy --drop-empty-elements no -qe ../zig-cache/langref.html # Produce the experimental std lib documentation. -stage3-release/bin/zig test ../lib/std/std.zig \ - -femit-docs \ - -fno-emit-bin \ - --zig-lib-dir "$(pwd)/../lib" +stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib stage3-release/bin/zig build \ --prefix stage4-release \ diff --git a/ci/macos/build-aarch64.sh b/ci/macos/build-aarch64.sh index e8fe795b42..52f3aad696 100755 --- a/ci/macos/build-aarch64.sh +++ b/ci/macos/build-aarch64.sh @@ -44,8 +44,4 @@ stage3-release/bin/zig build test docs \ --search-prefix "$PREFIX" # Produce the experimental std lib documentation. -mkdir -p "stage3-release/doc/std" -stage3-release/bin/zig test "$(pwd)/../lib/std/std.zig" \ - --zig-lib-dir "$(pwd)/../lib" \ - -femit-docs="$(pwd)/stage3-release/doc/std" \ - -fno-emit-bin +stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib diff --git a/ci/macos/build-x86_64.sh b/ci/macos/build-x86_64.sh index ea19abce45..1a30b10d3a 100755 --- a/ci/macos/build-x86_64.sh +++ b/ci/macos/build-x86_64.sh @@ -51,8 +51,4 @@ stage3-release/bin/zig build test docs \ --search-prefix "$PREFIX" # Produce the experimental std lib documentation. -mkdir -p "stage3-release/doc/std" -stage3-release/bin/zig test "$(pwd)/../lib/std/std.zig" \ - --zig-lib-dir "$(pwd)/../lib" \ - -femit-docs="$(pwd)/stage3-release/doc/std" \ - -fno-emit-bin +stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib diff --git a/ci/windows/build.ps1 b/ci/windows/build.ps1 index 954784bbca..9a63b2559b 100644 --- a/ci/windows/build.ps1 +++ b/ci/windows/build.ps1 @@ -53,11 +53,10 @@ Write-Output " zig build test docs..." CheckLastExitCode # Produce the experimental std lib documentation. -mkdir "$ZIGINSTALLDIR\doc\std" -force - Write-Output "zig test std/std.zig..." & "$ZIGINSTALLDIR\bin\zig.exe" test "$ZIGLIBDIR\std\std.zig" ` --zig-lib-dir "$ZIGLIBDIR" ` - -femit-docs="$ZIGINSTALLDIR\doc\std" ` + -femit-docs ` -fno-emit-bin +CheckLastExitCode diff --git a/lib/libc/glibc/sysdeps/arm/arm-features.h b/lib/libc/glibc/sysdeps/arm/arm-features.h new file mode 100644 index 0000000000..80a1e2272b --- /dev/null +++ b/lib/libc/glibc/sysdeps/arm/arm-features.h @@ -0,0 +1,59 @@ +/* Macros to test for CPU features on ARM. Generic ARM version. + Copyright (C) 2012-2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <https://www.gnu.org/licenses/>. */ + +#ifndef _ARM_ARM_FEATURES_H +#define _ARM_ARM_FEATURES_H 1 + +/* An OS-specific arm-features.h file should define ARM_HAVE_VFP to + an appropriate expression for testing at runtime whether the VFP + hardware is present. We'll then redefine it to a constant if we + know at compile time that we can assume VFP. */ + +#ifndef __SOFTFP__ +/* The compiler is generating VFP instructions, so we're already + assuming the hardware exists. */ +# undef ARM_HAVE_VFP +# define ARM_HAVE_VFP 1 +#endif + +/* An OS-specific arm-features.h file may define ARM_ASSUME_NO_IWMMXT + to indicate at compile time that iWMMXt hardware is never present + at runtime (or that we never care about its state) and so need not + be checked for. */ + +/* A more-specific arm-features.h file may define ARM_ALWAYS_BX to indicate + that instructions using pc as a destination register must never be used, + so a "bx" (or "blx") instruction is always required. */ + +/* The log2 of the minimum alignment required for an address that + is the target of a computed branch (i.e. a "bx" instruction). + A more-specific arm-features.h file may define this to set a more + stringent requirement. + + Using this only makes sense for code in ARM mode (where instructions + always have a fixed size of four bytes), or for Thumb-mode code that is + specifically aligning all the related branch targets to match (since + Thumb instructions might be either two or four bytes). */ +#ifndef ARM_BX_ALIGN_LOG2 +# define ARM_BX_ALIGN_LOG2 2 +#endif + +/* An OS-specific arm-features.h file may define ARM_NO_INDEX_REGISTER to + indicate that the two-register addressing modes must never be used. */ + +#endif /* arm-features.h */ diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig index 1482c8166d..3625aab576 100644 --- a/lib/std/Thread/Condition.zig +++ b/lib/std/Thread/Condition.zig @@ -194,42 +194,27 @@ const FutexImpl = struct { const signal_mask = 0xffff << 16; fn wait(self: *Impl, mutex: *Mutex, timeout: ?u64) error{Timeout}!void { - // Register that we're waiting on the state by incrementing the wait count. - // This assumes that there can be at most ((1<<16)-1) or 65,355 threads concurrently waiting on the same Condvar. - // If this is hit in practice, then this condvar not working is the least of your concerns. + // Observe the epoch, then check the state again to see if we should wake up. + // The epoch must be observed before we check the state or we could potentially miss a wake() and deadlock: + // + // - T1: s = LOAD(&state) + // - T2: UPDATE(&s, signal) + // - T2: UPDATE(&epoch, 1) + FUTEX_WAKE(&epoch) + // - T1: e = LOAD(&epoch) (was reordered after the state load) + // - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change) + // + // Acquire barrier to ensure the epoch load happens before the state load. + var epoch = self.epoch.load(.Acquire); var state = self.state.fetchAdd(one_waiter, .Monotonic); assert(state & waiter_mask != waiter_mask); state += one_waiter; - // Temporarily release the mutex in order to block on the condition variable. mutex.unlock(); defer mutex.lock(); var futex_deadline = Futex.Deadline.init(timeout); - while (true) { - // Try to wake up by consuming a signal and decremented the waiter we added previously. - // Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return. - while (state & signal_mask != 0) { - const new_state = state - one_waiter - one_signal; - state = self.state.tryCompareAndSwap(state, new_state, .Acquire, .Monotonic) orelse return; - } - - // Observe the epoch, then check the state again to see if we should wake up. - // The epoch must be observed before we check the state or we could potentially miss a wake() and deadlock: - // - // - T1: s = LOAD(&state) - // - T2: UPDATE(&s, signal) - // - T2: UPDATE(&epoch, 1) + FUTEX_WAKE(&epoch) - // - T1: e = LOAD(&epoch) (was reordered after the state load) - // - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change) - // - // Acquire barrier to ensure the epoch load happens before the state load. - const epoch = self.epoch.load(.Acquire); - state = self.state.load(.Monotonic); - if (state & signal_mask != 0) { - continue; - } + while (true) { futex_deadline.wait(&self.epoch, epoch) catch |err| switch (err) { // On timeout, we must decrement the waiter we added above. error.Timeout => { @@ -247,6 +232,16 @@ const FutexImpl = struct { } }, }; + + epoch = self.epoch.load(.Acquire); + state = self.state.load(.Monotonic); + + // Try to wake up by consuming a signal and decremented the waiter we added previously. + // Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return. + while (state & signal_mask != 0) { + const new_state = state - one_waiter - one_signal; + state = self.state.tryCompareAndSwap(state, new_state, .Acquire, .Monotonic) orelse return; + } } } @@ -536,3 +531,150 @@ test "Condition - broadcasting" { t.join(); } } + +test "Condition - broadcasting - wake all threads" { + // Tests issue #12877 + // This test requires spawning threads + if (builtin.single_threaded) { + return error.SkipZigTest; + } + + var num_runs: usize = 1; + const num_threads = 10; + + while (num_runs > 0) : (num_runs -= 1) { + const BroadcastTest = struct { + mutex: Mutex = .{}, + cond: Condition = .{}, + completed: Condition = .{}, + count: usize = 0, + thread_id_to_wake: usize = 0, + threads: [num_threads]std.Thread = undefined, + wakeups: usize = 0, + + fn run(self: *@This(), thread_id: usize) void { + self.mutex.lock(); + defer self.mutex.unlock(); + + // The last broadcast thread to start tells the main test thread it's completed. + self.count += 1; + if (self.count == num_threads) { + self.completed.signal(); + } + + while (self.thread_id_to_wake != thread_id) { + self.cond.timedWait(&self.mutex, 1 * std.time.ns_per_s) catch std.debug.panic("thread_id {d} timeout {d}", .{ thread_id, self.thread_id_to_wake }); + self.wakeups += 1; + } + if (self.thread_id_to_wake <= num_threads) { + // Signal next thread to wake up. + self.thread_id_to_wake += 1; + self.cond.broadcast(); + } + } + }; + + var broadcast_test = BroadcastTest{}; + var thread_id: usize = 1; + for (broadcast_test.threads) |*t| { + t.* = try std.Thread.spawn(.{}, BroadcastTest.run, .{ &broadcast_test, thread_id }); + thread_id += 1; + } + + { + broadcast_test.mutex.lock(); + defer broadcast_test.mutex.unlock(); + + // Wait for all the broadcast threads to spawn. + // timedWait() to detect any potential deadlocks. + while (broadcast_test.count != num_threads) { + try broadcast_test.completed.timedWait( + &broadcast_test.mutex, + 1 * std.time.ns_per_s, + ); + } + + // Signal thread 1 to wake up + broadcast_test.thread_id_to_wake = 1; + broadcast_test.cond.broadcast(); + } + + for (broadcast_test.threads) |t| { + t.join(); + } + } +} + +test "Condition - signal wakes one" { + // This test requires spawning threads + if (builtin.single_threaded) { + return error.SkipZigTest; + } + + var num_runs: usize = 1; + const num_threads = 3; + const timeoutDelay = 10 * std.time.ns_per_ms; + + while (num_runs > 0) : (num_runs -= 1) { + + // Start multiple runner threads, wait for them to start and send the signal + // then. Expect that one thread wake up and all other times out. + // + // Test depends on delay in timedWait! If too small all threads can timeout + // before any one gets wake up. + + const Runner = struct { + mutex: Mutex = .{}, + cond: Condition = .{}, + completed: Condition = .{}, + count: usize = 0, + threads: [num_threads]std.Thread = undefined, + wakeups: usize = 0, + timeouts: usize = 0, + + fn run(self: *@This()) void { + self.mutex.lock(); + defer self.mutex.unlock(); + + // The last started thread tells the main test thread it's completed. + self.count += 1; + if (self.count == num_threads) { + self.completed.signal(); + } + + self.cond.timedWait(&self.mutex, timeoutDelay) catch { + self.timeouts += 1; + return; + }; + self.wakeups += 1; + } + }; + + // Start threads + var runner = Runner{}; + for (runner.threads) |*t| { + t.* = try std.Thread.spawn(.{}, Runner.run, .{&runner}); + } + + { + runner.mutex.lock(); + defer runner.mutex.unlock(); + + // Wait for all the threads to spawn. + // timedWait() to detect any potential deadlocks. + while (runner.count != num_threads) { + try runner.completed.timedWait(&runner.mutex, 1 * std.time.ns_per_s); + } + // Signal one thread, the others should get timeout. + runner.cond.signal(); + } + + for (runner.threads) |t| { + t.join(); + } + + // Expect that only one got singal + try std.testing.expectEqual(runner.wakeups, 1); + try std.testing.expectEqual(runner.timeouts, num_threads - 1); + } +} diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 33604dfee7..1f7d48ccb9 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -168,7 +168,7 @@ pub const AddressSpace = enum { gs, fs, ss, - // GPU address spaces + // GPU address spaces. global, constant, param, @@ -869,6 +869,26 @@ pub noinline fn returnError(st: *StackTrace) void { addErrRetTraceAddr(st, @returnAddress()); } +pub const panic_messages = struct { + pub const unreach = "reached unreachable code"; + pub const unwrap_null = "attempt to use null value"; + pub const cast_to_null = "cast causes pointer to be null"; + pub const incorrect_alignment = "incorrect alignment"; + pub const invalid_error_code = "invalid error code"; + pub const cast_truncated_data = "integer cast truncated bits"; + pub const negative_to_unsigned = "attempt to cast negative value to unsigned integer"; + pub const integer_overflow = "integer overflow"; + pub const shl_overflow = "left shift overflowed bits"; + pub const shr_overflow = "right shift overflowed bits"; + pub const divide_by_zero = "division by zero"; + pub const exact_division_remainder = "exact division produced remainder"; + pub const inactive_union_field = "access of inactive union field"; + pub const integer_part_out_of_bounds = "integer part of floating point value out of bounds"; + pub const corrupt_switch = "switch on corrupt value"; + pub const shift_rhs_too_big = "shift amount is greater than the type size"; + pub const invalid_enum_value = "invalid enum value"; +}; + pub inline fn addErrRetTraceAddr(st: *StackTrace, addr: usize) void { if (st.index < st.instruction_addresses.len) st.instruction_addresses[st.index] = addr; diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig index 552bd114a1..149191040a 100644 --- a/lib/std/crypto/25519/ed25519.zig +++ b/lib/std/crypto/25519/ed25519.zig @@ -318,6 +318,7 @@ pub const Ed25519 = struct { h.update(&scalar_and_prefix.prefix); var noise2: [noise_length]u8 = undefined; crypto.random.bytes(&noise2); + h.update(&noise2); if (noise) |*z| { h.update(z); } diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 0d102493cf..feacf38daf 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -467,55 +467,49 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 { /// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters. /// Note: all usage of this function should be audited due to the existence of symlinks. /// Without performing actual syscalls, resolving `..` could be incorrect. +/// This API may break in the future: https://github.com/ziglang/zig/issues/13613 pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { - if (paths.len == 0) { - assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd - return process.getCwdAlloc(allocator); - } + assert(paths.len > 0); // determine which disk designator we will result with, if any var result_drive_buf = "_:".*; - var result_disk_designator: []const u8 = ""; - var have_drive_kind = WindowsPath.Kind.None; + var disk_designator: []const u8 = ""; + var drive_kind = WindowsPath.Kind.None; var have_abs_path = false; var first_index: usize = 0; - var max_size: usize = 0; for (paths) |p, i| { const parsed = windowsParsePath(p); if (parsed.is_abs) { have_abs_path = true; first_index = i; - max_size = result_disk_designator.len; } switch (parsed.kind) { - WindowsPath.Kind.Drive => { + .Drive => { result_drive_buf[0] = ascii.toUpper(parsed.disk_designator[0]); - result_disk_designator = result_drive_buf[0..]; - have_drive_kind = WindowsPath.Kind.Drive; + disk_designator = result_drive_buf[0..]; + drive_kind = WindowsPath.Kind.Drive; }, - WindowsPath.Kind.NetworkShare => { - result_disk_designator = parsed.disk_designator; - have_drive_kind = WindowsPath.Kind.NetworkShare; + .NetworkShare => { + disk_designator = parsed.disk_designator; + drive_kind = WindowsPath.Kind.NetworkShare; }, - WindowsPath.Kind.None => {}, + .None => {}, } - max_size += p.len + 1; } // if we will result with a disk designator, loop again to determine // which is the last time the disk designator is absolutely specified, if any // and count up the max bytes for paths related to this disk designator - if (have_drive_kind != WindowsPath.Kind.None) { + if (drive_kind != WindowsPath.Kind.None) { have_abs_path = false; first_index = 0; - max_size = result_disk_designator.len; var correct_disk_designator = false; for (paths) |p, i| { const parsed = windowsParsePath(p); if (parsed.kind != WindowsPath.Kind.None) { - if (parsed.kind == have_drive_kind) { - correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator); + if (parsed.kind == drive_kind) { + correct_disk_designator = compareDiskDesignators(drive_kind, disk_designator, parsed.disk_designator); } else { continue; } @@ -525,92 +519,51 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { } if (parsed.is_abs) { first_index = i; - max_size = result_disk_designator.len; have_abs_path = true; } - max_size += p.len + 1; } } - // Allocate result and fill in the disk designator, calling getCwd if we have to. - var result: []u8 = undefined; - var result_index: usize = 0; - - if (have_abs_path) { - switch (have_drive_kind) { - WindowsPath.Kind.Drive => { - result = try allocator.alloc(u8, max_size); + // Allocate result and fill in the disk designator. + var result = std.ArrayList(u8).init(allocator); + defer result.deinit(); - mem.copy(u8, result, result_disk_designator); - result_index += result_disk_designator.len; + const disk_designator_len: usize = l: { + if (!have_abs_path) break :l 0; + switch (drive_kind) { + .Drive => { + try result.appendSlice(disk_designator); + break :l disk_designator.len; }, - WindowsPath.Kind.NetworkShare => { - result = try allocator.alloc(u8, max_size); + .NetworkShare => { var it = mem.tokenize(u8, paths[first_index], "/\\"); const server_name = it.next().?; const other_name = it.next().?; - result[result_index] = '\\'; - result_index += 1; - result[result_index] = '\\'; - result_index += 1; - mem.copy(u8, result[result_index..], server_name); - result_index += server_name.len; - result[result_index] = '\\'; - result_index += 1; - mem.copy(u8, result[result_index..], other_name); - result_index += other_name.len; - - result_disk_designator = result[0..result_index]; + try result.ensureUnusedCapacity(2 + 1 + server_name.len + other_name.len); + result.appendSliceAssumeCapacity("\\\\"); + result.appendSliceAssumeCapacity(server_name); + result.appendAssumeCapacity('\\'); + result.appendSliceAssumeCapacity(other_name); + + break :l result.items.len; }, - WindowsPath.Kind.None => { - assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd - const cwd = try process.getCwdAlloc(allocator); - defer allocator.free(cwd); - const parsed_cwd = windowsParsePath(cwd); - result = try allocator.alloc(u8, max_size + parsed_cwd.disk_designator.len + 1); - mem.copy(u8, result, parsed_cwd.disk_designator); - result_index += parsed_cwd.disk_designator.len; - result_disk_designator = result[0..parsed_cwd.disk_designator.len]; - if (parsed_cwd.kind == WindowsPath.Kind.Drive) { - result[0] = ascii.toUpper(result[0]); - } - have_drive_kind = parsed_cwd.kind; + .None => { + break :l 1; }, } - } else { - assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd - // TODO call get cwd for the result_disk_designator instead of the global one - const cwd = try process.getCwdAlloc(allocator); - defer allocator.free(cwd); - - result = try allocator.alloc(u8, max_size + cwd.len + 1); - - mem.copy(u8, result, cwd); - result_index += cwd.len; - const parsed_cwd = windowsParsePath(result[0..result_index]); - result_disk_designator = parsed_cwd.disk_designator; - if (parsed_cwd.kind == WindowsPath.Kind.Drive) { - result[0] = ascii.toUpper(result[0]); - // Remove the trailing slash if present, eg. if the cwd is a root - // directory. - if (cwd.len > 0 and cwd[cwd.len - 1] == sep_windows) { - result_index -= 1; - } - } - have_drive_kind = parsed_cwd.kind; - } - errdefer allocator.free(result); + }; - // Now we know the disk designator to use, if any, and what kind it is. And our result - // is big enough to append all the paths to. var correct_disk_designator = true; + var negative_count: usize = 0; + for (paths[first_index..]) |p| { const parsed = windowsParsePath(p); - if (parsed.kind != WindowsPath.Kind.None) { - if (parsed.kind == have_drive_kind) { - correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator); + if (parsed.kind != .None) { + if (parsed.kind == drive_kind) { + const dd = result.items[0..disk_designator_len]; + correct_disk_designator = compareDiskDesignators(drive_kind, dd, parsed.disk_designator); } else { continue; } @@ -619,154 +572,167 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { continue; } var it = mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\"); - while (it.next()) |component| { + component: while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; } else if (mem.eql(u8, component, "..")) { while (true) { - if (result_index == 0 or result_index == result_disk_designator.len) - break; - result_index -= 1; - if (result[result_index] == '\\' or result[result_index] == '/') + if (result.items.len == 0) { + negative_count += 1; + continue :component; + } + if (result.items.len == disk_designator_len) { break; + } + const end_with_sep = switch (result.items[result.items.len - 1]) { + '\\', '/' => true, + else => false, + }; + result.items.len -= 1; + if (end_with_sep) break; } + } else if (!have_abs_path and result.items.len == 0) { + try result.appendSlice(component); } else { - result[result_index] = sep_windows; - result_index += 1; - mem.copy(u8, result[result_index..], component); - result_index += component.len; + try result.ensureUnusedCapacity(1 + component.len); + result.appendAssumeCapacity('\\'); + result.appendSliceAssumeCapacity(component); } } } - if (result_index == result_disk_designator.len) { - result[result_index] = '\\'; - result_index += 1; + if (disk_designator_len != 0 and result.items.len == disk_designator_len) { + try result.append('\\'); + return result.toOwnedSlice(); + } + + if (result.items.len == 0) { + if (negative_count == 0) { + return allocator.dupe(u8, "."); + } else { + const real_result = try allocator.alloc(u8, 3 * negative_count - 1); + var count = negative_count - 1; + var i: usize = 0; + while (count > 0) : (count -= 1) { + real_result[i..][0..3].* = "..\\".*; + i += 3; + } + real_result[i..][0..2].* = "..".*; + return real_result; + } } - return allocator.shrink(result, result_index); + if (negative_count == 0) { + return result.toOwnedSlice(); + } else { + const real_result = try allocator.alloc(u8, 3 * negative_count + result.items.len); + var count = negative_count; + var i: usize = 0; + while (count > 0) : (count -= 1) { + real_result[i..][0..3].* = "..\\".*; + i += 3; + } + mem.copy(u8, real_result[i..], result.items); + return real_result; + } } /// This function is like a series of `cd` statements executed one after another. /// It resolves "." and "..". /// The result does not have a trailing path separator. -/// If all paths are relative it uses the current working directory as a starting point. -/// Note: all usage of this function should be audited due to the existence of symlinks. -/// Without performing actual syscalls, resolving `..` could be incorrect. -pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 { - if (paths.len == 0) { - assert(native_os != .windows); // resolvePosix called on windows can't use getCwd - return process.getCwdAlloc(allocator); - } +/// This function does not perform any syscalls. Executing this series of path +/// lookups on the actual filesystem may produce different results due to +/// symlinks. +pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.Error![]u8 { + assert(paths.len > 0); - var first_index: usize = 0; - var have_abs = false; - var max_size: usize = 0; - for (paths) |p, i| { - if (isAbsolutePosix(p)) { - first_index = i; - have_abs = true; - max_size = 0; - } - max_size += p.len + 1; - } - - var result: []u8 = undefined; - var result_index: usize = 0; + var result = std.ArrayList(u8).init(allocator); + defer result.deinit(); - if (have_abs) { - result = try allocator.alloc(u8, max_size); - } else { - assert(native_os != .windows); // resolvePosix called on windows can't use getCwd - const cwd = try process.getCwdAlloc(allocator); - defer allocator.free(cwd); - result = try allocator.alloc(u8, max_size + cwd.len + 1); - mem.copy(u8, result, cwd); - result_index += cwd.len; - } - errdefer allocator.free(result); + var negative_count: usize = 0; + var is_abs = false; - for (paths[first_index..]) |p| { + for (paths) |p| { + if (isAbsolutePosix(p)) { + is_abs = true; + negative_count = 0; + result.clearRetainingCapacity(); + } var it = mem.tokenize(u8, p, "/"); - while (it.next()) |component| { + component: while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; } else if (mem.eql(u8, component, "..")) { while (true) { - if (result_index == 0) - break; - result_index -= 1; - if (result[result_index] == '/') - break; + if (result.items.len == 0) { + negative_count += @boolToInt(!is_abs); + continue :component; + } + const ends_with_slash = result.items[result.items.len - 1] == '/'; + result.items.len -= 1; + if (ends_with_slash) break; } + } else if (result.items.len > 0 or is_abs) { + try result.ensureUnusedCapacity(1 + component.len); + result.appendAssumeCapacity('/'); + result.appendSliceAssumeCapacity(component); } else { - result[result_index] = '/'; - result_index += 1; - mem.copy(u8, result[result_index..], component); - result_index += component.len; + try result.appendSlice(component); } } } - if (result_index == 0) { - result[0] = '/'; - result_index += 1; + if (result.items.len == 0) { + if (is_abs) { + return allocator.dupe(u8, "/"); + } + if (negative_count == 0) { + return allocator.dupe(u8, "."); + } else { + const real_result = try allocator.alloc(u8, 3 * negative_count - 1); + var count = negative_count - 1; + var i: usize = 0; + while (count > 0) : (count -= 1) { + real_result[i..][0..3].* = "../".*; + i += 3; + } + real_result[i..][0..2].* = "..".*; + return real_result; + } } - return allocator.shrink(result, result_index); + if (negative_count == 0) { + return result.toOwnedSlice(); + } else { + const real_result = try allocator.alloc(u8, 3 * negative_count + result.items.len); + var count = negative_count; + var i: usize = 0; + while (count > 0) : (count -= 1) { + real_result[i..][0..3].* = "../".*; + i += 3; + } + mem.copy(u8, real_result[i..], result.items); + return real_result; + } } test "resolve" { - if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest; - if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/"); + try testResolveWindows(&[_][]const u8{ "a\\b\\c\\", "..\\..\\.." }, ".."); + try testResolveWindows(&[_][]const u8{"."}, "."); - const cwd = try process.getCwdAlloc(testing.allocator); - defer testing.allocator.free(cwd); - if (native_os == .windows) { - if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) { - cwd[0] = ascii.toUpper(cwd[0]); - } - try testResolveWindows(&[_][]const u8{"."}, cwd); - } else { - try testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }, cwd); - try testResolvePosix(&[_][]const u8{"."}, cwd); - } + try testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }, ".."); + try testResolvePosix(&[_][]const u8{"."}, "."); } test "resolveWindows" { - if (builtin.target.cpu.arch == .aarch64) { - // TODO https://github.com/ziglang/zig/issues/3288 - return error.SkipZigTest; - } - if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest; - if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/"); - if (native_os == .windows) { - const cwd = try process.getCwdAlloc(testing.allocator); - defer testing.allocator.free(cwd); - const parsed_cwd = windowsParsePath(cwd); - { - const expected = try join(testing.allocator, &[_][]const u8{ - parsed_cwd.disk_designator, - "usr\\local\\lib\\zig\\std\\array_list.zig", - }); - defer testing.allocator.free(expected); - if (parsed_cwd.kind == WindowsPath.Kind.Drive) { - expected[0] = ascii.toUpper(parsed_cwd.disk_designator[0]); - } - try testResolveWindows(&[_][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" }, expected); - } - { - const expected = try join(testing.allocator, &[_][]const u8{ - cwd, - "usr\\local\\lib\\zig", - }); - defer testing.allocator.free(expected); - if (parsed_cwd.kind == WindowsPath.Kind.Drive) { - expected[0] = ascii.toUpper(parsed_cwd.disk_designator[0]); - } - try testResolveWindows(&[_][]const u8{ "usr/local", "lib\\zig" }, expected); - } - } + try testResolveWindows( + &[_][]const u8{ "Z:\\", "/usr/local", "lib\\zig\\std\\array_list.zig" }, + "Z:\\usr\\local\\lib\\zig\\std\\array_list.zig", + ); + try testResolveWindows( + &[_][]const u8{ "z:\\", "usr/local", "lib\\zig" }, + "Z:\\usr\\local\\lib\\zig", + ); try testResolveWindows(&[_][]const u8{ "c:\\a\\b\\c", "/hi", "ok" }, "C:\\hi\\ok"); try testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "c:../a" }, "C:\\blah\\a"); @@ -781,12 +747,12 @@ test "resolveWindows" { try testResolveWindows(&[_][]const u8{ "c:/", "//server//share" }, "\\\\server\\share\\"); try testResolveWindows(&[_][]const u8{ "c:/", "///some//dir" }, "C:\\some\\dir"); try testResolveWindows(&[_][]const u8{ "C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js" }, "C:\\foo\\tmp.3\\cycles\\root.js"); + + // Keep relative paths relative. + try testResolveWindows(&[_][]const u8{"a/b"}, "a\\b"); } test "resolvePosix" { - if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest; - if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/"); - try testResolvePosix(&[_][]const u8{ "/a/b", "c" }, "/a/b/c"); try testResolvePosix(&[_][]const u8{ "/a/b", "c", "//d", "e///" }, "/d/e"); try testResolvePosix(&[_][]const u8{ "/a/b/c", "..", "../" }, "/a"); @@ -797,18 +763,21 @@ test "resolvePosix" { try testResolvePosix(&[_][]const u8{ "/var/lib", "/../", "file/" }, "/file"); try testResolvePosix(&[_][]const u8{ "/some/dir", ".", "/absolute/" }, "/absolute"); try testResolvePosix(&[_][]const u8{ "/foo/tmp.3/", "../tmp.3/cycles/root.js" }, "/foo/tmp.3/cycles/root.js"); + + // Keep relative paths relative. + try testResolvePosix(&[_][]const u8{"a/b"}, "a/b"); } fn testResolveWindows(paths: []const []const u8, expected: []const u8) !void { const actual = try resolveWindows(testing.allocator, paths); defer testing.allocator.free(actual); - try testing.expect(mem.eql(u8, actual, expected)); + try testing.expectEqualStrings(expected, actual); } fn testResolvePosix(paths: []const []const u8, expected: []const u8) !void { const actual = try resolvePosix(testing.allocator, paths); defer testing.allocator.free(actual); - try testing.expect(mem.eql(u8, actual, expected)); + try testing.expectEqualStrings(expected, actual); } /// Strip the last component from a file path. @@ -1089,13 +1058,15 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ! if (parsed_from.kind != parsed_to.kind) { break :x true; } else switch (parsed_from.kind) { - WindowsPath.Kind.NetworkShare => { + .NetworkShare => { break :x !networkShareServersEql(parsed_to.disk_designator, parsed_from.disk_designator); }, - WindowsPath.Kind.Drive => { + .Drive => { break :x ascii.toUpper(parsed_from.disk_designator[0]) != ascii.toUpper(parsed_to.disk_designator[0]); }, - else => unreachable, + .None => { + break :x false; + }, } }; @@ -1194,13 +1165,6 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![] } test "relative" { - if (builtin.target.cpu.arch == .aarch64) { - // TODO https://github.com/ziglang/zig/issues/3288 - return error.SkipZigTest; - } - if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest; - if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/"); - try testRelativeWindows("c:/blah\\blah", "d:/games", "D:\\games"); try testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa", ".."); try testRelativeWindows("c:/aaaa/bbbb", "c:/cccc", "..\\..\\cccc"); @@ -1226,6 +1190,10 @@ test "relative" { try testRelativeWindows("C:\\baz", "\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz"); try testRelativeWindows("\\\\foo\\bar\\baz", "C:\\baz", "C:\\baz"); + try testRelativeWindows("a/b/c", "a\\b", ".."); + try testRelativeWindows("a/b/c", "a", "..\\.."); + try testRelativeWindows("a/b/c", "a\\b\\c\\d", "d"); + try testRelativePosix("/var/lib", "/var", ".."); try testRelativePosix("/var/lib", "/bin", "../../bin"); try testRelativePosix("/var/lib", "/var/lib", ""); @@ -1243,13 +1211,13 @@ test "relative" { fn testRelativePosix(from: []const u8, to: []const u8, expected_output: []const u8) !void { const result = try relativePosix(testing.allocator, from, to); defer testing.allocator.free(result); - try testing.expectEqualSlices(u8, expected_output, result); + try testing.expectEqualStrings(expected_output, result); } fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []const u8) !void { const result = try relativeWindows(testing.allocator, from, to); defer testing.allocator.free(result); - try testing.expectEqualSlices(u8, expected_output, result); + try testing.expectEqualStrings(expected_output, result); } /// Returns the extension of the file name (if any). diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index f6168054b6..00e42b6417 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -1095,7 +1095,9 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" { const allocator = testing.allocator; - const file_paths: [1][]const u8 = .{"zig-test-absolute-paths.txt"}; + const cwd = try std.process.getCwdAlloc(allocator); + defer allocator.free(cwd); + const file_paths: [2][]const u8 = .{ cwd, "zig-test-absolute-paths.txt" }; const filename = try fs.path.resolve(allocator, &file_paths); defer allocator.free(filename); diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 2051215dfe..6358873ede 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -202,10 +202,7 @@ pub const PreopenList = struct { // POSIX paths, relative to "/" or `cwd_root` depending on whether they start with "." const path = if (cwd_root) |cwd| blk: { const resolve_paths: []const []const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path }; - break :blk fs.path.resolve(self.buffer.allocator, resolve_paths) catch |err| switch (err) { - error.CurrentWorkingDirectoryUnlinked => unreachable, // root is absolute, so CWD not queried - else => |e| return e, - }; + break :blk try fs.path.resolve(self.buffer.allocator, resolve_paths); } else blk: { // If we were provided no CWD root, we preserve the preopen dir without resolving break :blk try self.buffer.allocator.dupe(u8, raw_path); diff --git a/lib/std/target.zig b/lib/std/target.zig index 745be7dbff..49a7bd1c7d 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1179,10 +1179,12 @@ pub const Target = struct { /// Returns whether this architecture supports the address space pub fn supportsAddressSpace(arch: Arch, address_space: std.builtin.AddressSpace) bool { const is_nvptx = arch == .nvptx or arch == .nvptx64; + const is_spirv = arch == .spirv32 or arch == .spirv64; + const is_gpu = is_nvptx or is_spirv or arch == .amdgcn; return switch (address_space) { .generic => true, .fs, .gs, .ss => arch == .x86_64 or arch == .x86, - .global, .constant, .local, .shared => arch == .amdgcn or is_nvptx, + .global, .constant, .local, .shared => is_gpu, .param => is_nvptx, }; } @@ -344,6 +344,12 @@ static inline zig_bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_ #endif } +static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) @@ -358,6 +364,12 @@ static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); } +static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u64 full_res; @@ -370,6 +382,12 @@ static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_ #endif } +static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) @@ -384,6 +402,12 @@ static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); } +static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u8 full_res; @@ -395,6 +419,12 @@ static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b #endif } +static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i8 full_res; @@ -406,6 +436,12 @@ static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b #endif } +static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u16 full_res; @@ -417,6 +453,12 @@ static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_ #endif } +static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i16 full_res; @@ -428,6 +470,12 @@ static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_ #endif } +static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u32 full_res; @@ -440,6 +488,12 @@ static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_ #endif } +static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) @@ -454,6 +508,12 @@ static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); } +static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u64 full_res; @@ -466,6 +526,12 @@ static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_ #endif } +static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) @@ -480,6 +546,12 @@ static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); } +static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u8 full_res; @@ -491,6 +563,12 @@ static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b #endif } +static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i8 full_res; @@ -502,6 +580,13 @@ static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b #endif } +static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits); +} + + static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u16 full_res; @@ -513,6 +598,13 @@ static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_ #endif } +static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits); +} + + static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i16 full_res; @@ -524,6 +616,12 @@ static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_ #endif } +static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u32 full_res; @@ -536,6 +634,12 @@ static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_ #endif } +static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, + const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) @@ -550,6 +654,12 @@ static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); } +static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, + const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u64 full_res; @@ -562,6 +672,12 @@ static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_ #endif } +static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, + const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits); +} + zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) @@ -576,6 +692,12 @@ static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); } +static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, + const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u8 full_res; @@ -587,6 +709,12 @@ static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b #endif } +static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, + const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i8 full_res; @@ -598,6 +726,12 @@ static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b #endif } +static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, + const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u16 full_res; @@ -609,6 +743,12 @@ static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_ #endif } +static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, + const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); +} + static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i16 full_res; @@ -620,6 +760,12 @@ static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_ #endif } +static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, + const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits) +{ + for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits); +} + #define zig_int_builtins(w) \ static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ @@ -846,10 +992,8 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) { full_res = __builtin_bitreverse8(val); #else static zig_u8 const lut[0x10] = { - 0b0000, 0b1000, 0b0100, 0b1100, - 0b0010, 0b1010, 0b0110, 0b1110, - 0b0001, 0b1001, 0b0101, 0b1101, - 0b0011, 0b1011, 0b0111, 0b1111, + 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, + 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf }; full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0; #endif diff --git a/src/Cache.zig b/src/Cache.zig index da1e056644..a645e06594 100644 --- a/src/Cache.zig +++ b/src/Cache.zig @@ -1,3 +1,7 @@ +//! Manages `zig-cache` directories. +//! This is not a general-purpose cache. It is designed to be fast and simple, +//! not to withstand attacks using specially-crafted input. + gpa: Allocator, manifest_dir: fs.Dir, hash: HashHelper = .{}, @@ -5,6 +9,14 @@ hash: HashHelper = .{}, recent_problematic_timestamp: i128 = 0, mutex: std.Thread.Mutex = .{}, +/// A set of strings such as the zig library directory or project source root, which +/// are stripped from the file paths before putting into the cache. They +/// are replaced with single-character indicators. This is not to save +/// space but to eliminate absolute file paths. This improves portability +/// and usefulness of the cache for advanced use cases. +prefixes_buffer: [3]Compilation.Directory = undefined, +prefixes_len: usize = 0, + const Cache = @This(); const std = @import("std"); const builtin = @import("builtin"); @@ -18,6 +30,14 @@ const Allocator = std.mem.Allocator; const Compilation = @import("Compilation.zig"); const log = std.log.scoped(.cache); +pub fn addPrefix(cache: *Cache, directory: Compilation.Directory) void { + if (directory.path) |p| { + log.debug("Cache.addPrefix {d} {s}", .{ cache.prefixes_len, p }); + } + cache.prefixes_buffer[cache.prefixes_len] = directory; + cache.prefixes_len += 1; +} + /// Be sure to call `Manifest.deinit` after successful initialization. pub fn obtain(cache: *Cache) Manifest { return Manifest{ @@ -29,6 +49,48 @@ pub fn obtain(cache: *Cache) Manifest { }; } +pub fn prefixes(cache: *const Cache) []const Compilation.Directory { + return cache.prefixes_buffer[0..cache.prefixes_len]; +} + +const PrefixedPath = struct { + prefix: u8, + sub_path: []u8, +}; + +fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath { + const gpa = cache.gpa; + const resolved_path = try fs.path.resolve(gpa, &[_][]const u8{file_path}); + errdefer gpa.free(resolved_path); + return findPrefixResolved(cache, resolved_path); +} + +/// Takes ownership of `resolved_path` on success. +fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath { + const gpa = cache.gpa; + const prefixes_slice = cache.prefixes(); + var i: u8 = 1; // Start at 1 to skip over checking the null prefix. + while (i < prefixes_slice.len) : (i += 1) { + const p = prefixes_slice[i].path.?; + if (mem.startsWith(u8, resolved_path, p)) { + // +1 to skip over the path separator here + const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]); + gpa.free(resolved_path); + return PrefixedPath{ + .prefix = @intCast(u8, i), + .sub_path = sub_path, + }; + } else { + log.debug("'{s}' does not start with '{s}'", .{ resolved_path, p }); + } + } + + return PrefixedPath{ + .prefix = 0, + .sub_path = resolved_path, + }; +} + /// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6 pub const bin_digest_len = 16; pub const hex_digest_len = bin_digest_len * 2; @@ -45,7 +107,7 @@ pub const Hasher = crypto.auth.siphash.SipHash128(1, 3); pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length); pub const File = struct { - path: ?[]const u8, + prefixed_path: ?PrefixedPath, max_file_size: ?usize, stat: Stat, bin_digest: BinDigest, @@ -57,13 +119,13 @@ pub const File = struct { mtime: i128, }; - pub fn deinit(self: *File, allocator: Allocator) void { - if (self.path) |owned_slice| { - allocator.free(owned_slice); - self.path = null; + pub fn deinit(self: *File, gpa: Allocator) void { + if (self.prefixed_path) |pp| { + gpa.free(pp.sub_path); + self.prefixed_path = null; } if (self.contents) |contents| { - allocator.free(contents); + gpa.free(contents); self.contents = null; } self.* = undefined; @@ -175,9 +237,6 @@ pub const Lock = struct { } }; -/// Manifest manages project-local `zig-cache` directories. -/// This is not a general-purpose cache. -/// It is designed to be fast and simple, not to withstand attacks using specially-crafted input. pub const Manifest = struct { cache: *Cache, /// Current state for incremental hashing. @@ -220,21 +279,27 @@ pub const Manifest = struct { pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize { assert(self.manifest_file == null); - try self.files.ensureUnusedCapacity(self.cache.gpa, 1); - const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path}); + const gpa = self.cache.gpa; + try self.files.ensureUnusedCapacity(gpa, 1); + const prefixed_path = try self.cache.findPrefix(file_path); + errdefer gpa.free(prefixed_path.sub_path); + + log.debug("Manifest.addFile {s} -> {d} {s}", .{ + file_path, prefixed_path.prefix, prefixed_path.sub_path, + }); - const idx = self.files.items.len; self.files.addOneAssumeCapacity().* = .{ - .path = resolved_path, + .prefixed_path = prefixed_path, .contents = null, .max_file_size = max_file_size, .stat = undefined, .bin_digest = undefined, }; - self.hash.addBytes(resolved_path); + self.hash.add(prefixed_path.prefix); + self.hash.addBytes(prefixed_path.sub_path); - return idx; + return self.files.items.len - 1; } pub fn hashCSource(self: *Manifest, c_source: Compilation.CSourceFile) !void { @@ -281,6 +346,7 @@ pub const Manifest = struct { /// option, one may call `toOwnedLock` to obtain a smaller object which can represent /// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called. pub fn hit(self: *Manifest) !bool { + const gpa = self.cache.gpa; assert(self.manifest_file == null); self.failed_file_index = null; @@ -362,8 +428,8 @@ pub const Manifest = struct { self.want_refresh_timestamp = true; - const file_contents = try self.manifest_file.?.reader().readAllAlloc(self.cache.gpa, manifest_file_size_max); - defer self.cache.gpa.free(file_contents); + const file_contents = try self.manifest_file.?.reader().readAllAlloc(gpa, manifest_file_size_max); + defer gpa.free(file_contents); const input_file_count = self.files.items.len; var any_file_changed = false; @@ -373,9 +439,9 @@ pub const Manifest = struct { defer idx += 1; const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: { - const new = try self.files.addOne(self.cache.gpa); + const new = try self.files.addOne(gpa); new.* = .{ - .path = null, + .prefixed_path = null, .contents = null, .max_file_size = null, .stat = undefined, @@ -389,27 +455,35 @@ pub const Manifest = struct { const inode = iter.next() orelse return error.InvalidFormat; const mtime_nsec_str = iter.next() orelse return error.InvalidFormat; const digest_str = iter.next() orelse return error.InvalidFormat; + const prefix_str = iter.next() orelse return error.InvalidFormat; const file_path = iter.rest(); cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat; cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat; cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat; _ = std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat; + const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat; + if (prefix >= self.cache.prefixes_len) return error.InvalidFormat; if (file_path.len == 0) { return error.InvalidFormat; } - if (cache_hash_file.path) |p| { - if (!mem.eql(u8, file_path, p)) { + if (cache_hash_file.prefixed_path) |pp| { + if (pp.prefix != prefix or !mem.eql(u8, file_path, pp.sub_path)) { return error.InvalidFormat; } } - if (cache_hash_file.path == null) { - cache_hash_file.path = try self.cache.gpa.dupe(u8, file_path); + if (cache_hash_file.prefixed_path == null) { + cache_hash_file.prefixed_path = .{ + .prefix = prefix, + .sub_path = try gpa.dupe(u8, file_path), + }; } - const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .mode = .read_only }) catch |err| switch (err) { + const pp = cache_hash_file.prefixed_path.?; + const dir = self.cache.prefixes()[pp.prefix].handle; + const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) { error.FileNotFound => { try self.upgradeToExclusiveLock(); return false; @@ -535,8 +609,9 @@ pub const Manifest = struct { } fn populateFileHash(self: *Manifest, ch_file: *File) !void { - log.debug("populateFileHash {s}", .{ch_file.path.?}); - const file = try fs.cwd().openFile(ch_file.path.?, .{}); + const pp = ch_file.prefixed_path.?; + const dir = self.cache.prefixes()[pp.prefix].handle; + const file = try dir.openFile(pp.sub_path, .{}); defer file.close(); const actual_stat = try file.stat(); @@ -588,12 +663,17 @@ pub const Manifest = struct { pub fn addFilePostFetch(self: *Manifest, file_path: []const u8, max_file_size: usize) ![]const u8 { assert(self.manifest_file != null); - const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path}); - errdefer self.cache.gpa.free(resolved_path); + const gpa = self.cache.gpa; + const prefixed_path = try self.cache.findPrefix(file_path); + errdefer gpa.free(prefixed_path.sub_path); + + log.debug("Manifest.addFilePostFetch {s} -> {d} {s}", .{ + file_path, prefixed_path.prefix, prefixed_path.sub_path, + }); - const new_ch_file = try self.files.addOne(self.cache.gpa); + const new_ch_file = try self.files.addOne(gpa); new_ch_file.* = .{ - .path = resolved_path, + .prefixed_path = prefixed_path, .max_file_size = max_file_size, .stat = undefined, .bin_digest = undefined, @@ -613,12 +693,17 @@ pub const Manifest = struct { pub fn addFilePost(self: *Manifest, file_path: []const u8) !void { assert(self.manifest_file != null); - const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path}); - errdefer self.cache.gpa.free(resolved_path); + const gpa = self.cache.gpa; + const prefixed_path = try self.cache.findPrefix(file_path); + errdefer gpa.free(prefixed_path.sub_path); + + log.debug("Manifest.addFilePost {s} -> {d} {s}", .{ + file_path, prefixed_path.prefix, prefixed_path.sub_path, + }); - const new_ch_file = try self.files.addOne(self.cache.gpa); + const new_ch_file = try self.files.addOne(gpa); new_ch_file.* = .{ - .path = resolved_path, + .prefixed_path = prefixed_path, .max_file_size = null, .stat = undefined, .bin_digest = undefined, @@ -633,17 +718,27 @@ pub const Manifest = struct { /// On success, cache takes ownership of `resolved_path`. pub fn addFilePostContents( self: *Manifest, - resolved_path: []const u8, + resolved_path: []u8, bytes: []const u8, stat: File.Stat, ) error{OutOfMemory}!void { assert(self.manifest_file != null); + const gpa = self.cache.gpa; - const ch_file = try self.files.addOne(self.cache.gpa); + const ch_file = try self.files.addOne(gpa); errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1); + log.debug("Manifest.addFilePostContents resolved_path={s}", .{resolved_path}); + + const prefixed_path = try self.cache.findPrefixResolved(resolved_path); + errdefer gpa.free(prefixed_path.sub_path); + + log.debug("Manifest.addFilePostContents -> {d} {s}", .{ + prefixed_path.prefix, prefixed_path.sub_path, + }); + ch_file.* = .{ - .path = resolved_path, + .prefixed_path = prefixed_path, .max_file_size = null, .stat = stat, .bin_digest = undefined, @@ -742,12 +837,13 @@ pub const Manifest = struct { "{s}", .{std.fmt.fmtSliceHexLower(&file.bin_digest)}, ) catch unreachable; - try writer.print("{d} {d} {d} {s} {s}\n", .{ + try writer.print("{d} {d} {d} {s} {d} {s}\n", .{ file.stat.size, file.stat.inode, file.stat.mtime, &encoded_digest, - file.path.?, + file.prefixed_path.?.prefix, + file.prefixed_path.?.sub_path, }); } @@ -889,6 +985,7 @@ test "cache file and then recall it" { .gpa = testing.allocator, .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); defer cache.manifest_dir.close(); { @@ -960,6 +1057,7 @@ test "check that changing a file makes cache fail" { .gpa = testing.allocator, .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); defer cache.manifest_dir.close(); { @@ -1022,6 +1120,7 @@ test "no file inputs" { .gpa = testing.allocator, .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); defer cache.manifest_dir.close(); { @@ -1080,6 +1179,7 @@ test "Manifest with files added after initial hash work" { .gpa = testing.allocator, .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); defer cache.manifest_dir.close(); { diff --git a/src/Compilation.zig b/src/Compilation.zig index 60064fefd1..795eb493e2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -201,7 +201,9 @@ pub const CRTFile = struct { /// For passing to a C compiler. pub const CSourceFile = struct { src_path: []const u8, - extra_flags: []const []const u8 = &[0][]const u8{}, + extra_flags: []const []const u8 = &.{}, + /// Same as extra_flags except they are not added to the Cache hash. + cache_exempt_flags: []const []const u8 = &.{}, }; const Job = union(enum) { @@ -1456,23 +1458,27 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { else => @as(u8, 3), }; - // We put everything into the cache hash that *cannot be modified during an incremental update*. - // For example, one cannot change the target between updates, but one can change source files, - // so the target goes into the cache hash, but source files do not. This is so that we can - // find the same binary and incrementally update it even if there are modified source files. - // We do this even if outputting to the current directory because we need somewhere to store - // incremental compilation metadata. + // We put everything into the cache hash that *cannot be modified + // during an incremental update*. For example, one cannot change the + // target between updates, but one can change source files, so the + // target goes into the cache hash, but source files do not. This is so + // that we can find the same binary and incrementally update it even if + // there are modified source files. We do this even if outputting to + // the current directory because we need somewhere to store incremental + // compilation metadata. const cache = try arena.create(Cache); cache.* = .{ .gpa = gpa, .manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(options.zig_lib_directory); + cache.addPrefix(options.local_cache_directory); errdefer cache.manifest_dir.close(); // This is shared hasher state common to zig source and all C source files. cache.hash.addBytes(build_options.version); cache.hash.add(builtin.zig_backend); - cache.hash.addBytes(options.zig_lib_directory.path orelse "."); cache.hash.add(options.optimize_mode); cache.hash.add(options.target.cpu.arch); cache.hash.addBytes(options.target.cpu.model.name); @@ -2265,8 +2271,9 @@ pub fn update(comp: *Compilation) !void { const is_hit = man.hit() catch |err| { // TODO properly bubble these up instead of emitting a warning const i = man.failed_file_index orelse return err; - const file_path = man.files.items[i].path orelse return err; - std.log.warn("{s}: {s}", .{ @errorName(err), file_path }); + const pp = man.files.items[i].prefixed_path orelse return err; + const prefix = man.cache.prefixes()[pp.prefix].path orelse ""; + std.log.warn("{s}: {s}{s}", .{ @errorName(err), prefix, pp.sub_path }); return err; }; if (is_hit) { @@ -3246,13 +3253,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void { const module = comp.bin_file.options.module.?; module.semaPkg(pkg) catch |err| switch (err) { - error.CurrentWorkingDirectoryUnlinked, - error.Unexpected, - => comp.lockAndSetMiscFailure( - .analyze_pkg, - "unexpected problem analyzing package '{s}'", - .{pkg.root_src_path}, - ), error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; @@ -3557,7 +3557,14 @@ pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest { man.hash.add(comp.sanitize_c); man.hash.addListOfBytes(comp.clang_argv); man.hash.add(comp.bin_file.options.link_libcpp); - man.hash.addListOfBytes(comp.libc_include_dir_list); + + // When libc_installation is null it means that Zig generated this dir list + // based on the zig library directory alone. The zig lib directory file + // path is purposefully either in the cache or not in the cache. The + // decision should not be overridden here. + if (comp.bin_file.options.libc_installation != null) { + man.hash.addListOfBytes(comp.libc_include_dir_list); + } return man; } @@ -3944,6 +3951,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P { try comp.addCCArgs(arena, &argv, ext, null); try argv.appendSlice(c_object.src.extra_flags); + try argv.appendSlice(c_object.src.cache_exempt_flags); const out_obj_path = if (comp.bin_file.options.emit) |emit| try emit.directory.join(arena, &.{emit.sub_path}) @@ -3985,6 +3993,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path}); try comp.addCCArgs(arena, &argv, ext, out_dep_path); try argv.appendSlice(c_object.src.extra_flags); + try argv.appendSlice(c_object.src.cache_exempt_flags); try argv.ensureUnusedCapacity(5); switch (comp.clang_preprocessor_mode) { diff --git a/src/Sema.zig b/src/Sema.zig index a756f8be81..ad78d81800 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22308,40 +22308,16 @@ fn safetyPanic( src: LazySrcLoc, panic_id: PanicId, ) CompileError!Zir.Inst.Index { - const msg = switch (panic_id) { - .unreach => "reached unreachable code", - .unwrap_null => "attempt to use null value", - .cast_to_null => "cast causes pointer to be null", - .incorrect_alignment => "incorrect alignment", - .invalid_error_code => "invalid error code", - .cast_truncated_data => "integer cast truncated bits", - .negative_to_unsigned => "attempt to cast negative value to unsigned integer", - .integer_overflow => "integer overflow", - .shl_overflow => "left shift overflowed bits", - .shr_overflow => "right shift overflowed bits", - .divide_by_zero => "division by zero", - .exact_division_remainder => "exact division produced remainder", - .inactive_union_field => "access of inactive union field", - .integer_part_out_of_bounds => "integer part of floating point value out of bounds", - .corrupt_switch => "switch on corrupt value", - .shift_rhs_too_big => "shift amount is greater than the type size", - .invalid_enum_value => "invalid enum value", - }; - - const msg_inst = msg_inst: { - // TODO instead of making a new decl for every panic in the entire compilation, - // introduce the concept of a reference-counted decl for these - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish( - try Type.Tag.array_u8.create(anon_decl.arena(), msg.len), - try Value.Tag.bytes.create(anon_decl.arena(), msg), - 0, // default alignment - )); - }; + const panic_messages_ty = try sema.getBuiltinType("panic_messages"); + const msg_decl_index = (try sema.namespaceLookup( + block, + src, + panic_messages_ty.getNamespace().?, + @tagName(panic_id), + )).?; - const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); - return sema.panicWithMsg(block, src, casted_msg_inst); + const msg_inst = try sema.analyzeDeclVal(block, src, msg_decl_index); + return sema.panicWithMsg(block, src, msg_inst); } fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { @@ -30926,10 +30902,14 @@ pub fn analyzeAddressSpace( const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); const target = sema.mod.getTarget(); const arch = target.cpu.arch; + const is_nv = arch == .nvptx or arch == .nvptx64; - const is_gpu = is_nv or arch == .amdgcn; + const is_amd = arch == .amdgcn; + const is_spirv = arch == .spirv32 or arch == .spirv64; + const is_gpu = is_nv or is_amd or is_spirv; const supported = switch (address_space) { + // TODO: on spir-v only when os is opencl. .generic => true, .gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer, // TODO: check that .shared and .local are left uninitialized diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5c459e7d9b..91e9b5f939 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -18,6 +18,12 @@ const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const CType = @import("../type.zig").CType; +const target_util = @import("../target.zig"); +const libcFloatPrefix = target_util.libcFloatPrefix; +const libcFloatSuffix = target_util.libcFloatSuffix; +const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; +const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; + const Mutability = enum { Const, ConstArgument, Mut }; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -733,7 +739,7 @@ pub const DeclGen = struct { try dg.fmtIntLiteral(ty.errorUnionSet(), val), }); }, - .Array => { + .Array, .Vector => { if (location != .Initializer) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); @@ -770,10 +776,10 @@ pub const DeclGen = struct { .BoundFn, .Opaque, => unreachable, + .Fn, .Frame, .AnyFrame, - .Vector, => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ @tagName(tag), }), @@ -922,7 +928,7 @@ pub const DeclGen = struct { => try dg.renderParentPtr(writer, val, ty), else => unreachable, }, - .Array => { + .Array, .Vector => { if (location == .FunctionArgument) { try writer.writeByte('('); try dg.renderTypecast(writer, ty); @@ -1200,7 +1206,6 @@ pub const DeclGen = struct { .Frame, .AnyFrame, - .Vector, => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ @tagName(tag), }), @@ -1746,7 +1751,7 @@ pub const DeclGen = struct { if (t.isVolatilePtr()) try w.writeAll(" volatile"); return w.writeAll(" *"); }, - .Array => { + .Array, .Vector => { var array_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ .len = t.arrayLenIncludingSentinel(), .elem_type = t.childType(), @@ -1859,7 +1864,6 @@ pub const DeclGen = struct { .Frame, .AnyFrame, - .Vector, => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ @tagName(tag), }), @@ -3180,25 +3184,43 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const rhs = try f.resolveInst(bin_op.rhs); const inst_ty = f.air.typeOfIndex(inst); - const scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(); + const vector_ty = f.air.typeOf(bin_op.lhs); + const scalar_ty = vector_ty.scalarType(); const w = f.object.writer(); const local = try f.allocLocal(inst_ty, .Mut); try w.writeAll(";\n"); - try f.writeCValueMember(w, local, .{ .field = 1 }); - try w.writeAll(" = zig_"); - try w.writeAll(operation); - try w.writeAll("o_"); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); - try w.writeAll("(&"); - try f.writeCValueMember(w, local, .{ .field = 0 }); - try w.writeAll(", "); + switch (vector_ty.zigTypeTag()) { + .Vector => { + try w.writeAll("zig_v"); + try w.writeAll(operation); + try w.writeAll("o_"); + try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try w.writeAll("("); + try f.writeCValueMember(w, local, .{ .field = 1 }); + try w.writeAll(", "); + try f.writeCValueMember(w, local, .{ .field = 0 }); + try w.print(", {d}, ", .{vector_ty.vectorLen()}); + }, + else => { + try f.writeCValueMember(w, local, .{ .field = 1 }); + try w.writeAll(" = zig_"); + try w.writeAll(operation); + try w.writeAll("o_"); + try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try w.writeAll("(&"); + try f.writeCValueMember(w, local, .{ .field = 0 }); + try w.writeAll(", "); + }, + } + try f.writeCValue(w, lhs, .FunctionArgument); try w.writeAll(", "); try f.writeCValue(w, rhs, .FunctionArgument); try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeAll(");\n"); + return local; } @@ -5206,16 +5228,153 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; - const inst_ty = f.air.typeOfIndex(inst); + const target = f.object.dg.module.getTarget(); + const scalar_ty = f.air.typeOfIndex(inst); const reduce = f.air.instructions.items(.data)[inst].reduce; const operand = try f.resolveInst(reduce.operand); + const operand_ty = f.air.typeOf(reduce.operand); + const vector_len = operand_ty.vectorLen(); const writer = f.object.writer(); - const local = try f.allocLocal(inst_ty, .Const); + + const Op = union(enum) { + call_fn: []const u8, + infix: []const u8, + ternary: []const u8, + }; + var fn_name_buf: [64]u8 = undefined; + const op: Op = switch (reduce.operation) { + .And => .{ .infix = " &= " }, + .Or => .{ .infix = " |= " }, + .Xor => .{ .infix = " ^= " }, + .Min => switch (scalar_ty.zigTypeTag()) { + .Int => Op{ .ternary = " < " }, + .Float => op: { + const float_bits = scalar_ty.floatBits(target); + break :op Op{ + .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{ + libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), + }) catch unreachable, + }; + }, + else => unreachable, + }, + .Max => switch (scalar_ty.zigTypeTag()) { + .Int => Op{ .ternary = " > " }, + .Float => op: { + const float_bits = scalar_ty.floatBits(target); + break :op Op{ + .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{ + libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), + }) catch unreachable, + }; + }, + else => unreachable, + }, + .Add => switch (scalar_ty.zigTypeTag()) { + .Int => Op{ .infix = " += " }, + .Float => op: { + const float_bits = scalar_ty.floatBits(target); + break :op Op{ + .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{ + compilerRtFloatAbbrev(float_bits), + }) catch unreachable, + }; + }, + else => unreachable, + }, + .Mul => switch (scalar_ty.zigTypeTag()) { + .Int => Op{ .infix = " *= " }, + .Float => op: { + const float_bits = scalar_ty.floatBits(target); + break :op Op{ + .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{ + compilerRtFloatAbbrev(float_bits), + }) catch unreachable, + }; + }, + else => unreachable, + }, + }; + + // Reduce a vector by repeatedly applying a function to produce an + // accumulated result. + // + // Equivalent to: + // reduce: { + // var i: usize = 0; + // var accum: T = init; + // while (i < vec.len) : (i += 1) { + // accum = func(accum, vec[i]); + // } + // break :reduce accum; + // } + const it = try f.allocLocal(Type.usize, .Mut); + try writer.writeAll(" = 0;\n"); + + const accum = try f.allocLocal(scalar_ty, .Mut); try writer.writeAll(" = "); - _ = operand; - _ = local; - return f.fail("TODO: C backend: implement airReduce", .{}); + const init_val = switch (reduce.operation) { + .And, .Or, .Xor, .Add => "0", + .Min => switch (scalar_ty.zigTypeTag()) { + .Int => "TODO_intmax", + .Float => "TODO_nan", + else => unreachable, + }, + .Max => switch (scalar_ty.zigTypeTag()) { + .Int => "TODO_intmin", + .Float => "TODO_nan", + else => unreachable, + }, + .Mul => "1", + }; + try writer.writeAll(init_val); + try writer.writeAll(";"); + try f.object.indent_writer.insertNewline(); + try writer.writeAll("for(;"); + try f.writeCValue(writer, it, .Other); + try writer.print("<{d};++", .{vector_len}); + try f.writeCValue(writer, it, .Other); + try writer.writeAll(") "); + try f.writeCValue(writer, accum, .Other); + + switch (op) { + .call_fn => |fn_name| { + try writer.print(" = {s}(", .{fn_name}); + try f.writeCValue(writer, accum, .FunctionArgument); + try writer.writeAll(", "); + try f.writeCValue(writer, operand, .Other); + try writer.writeAll("["); + try f.writeCValue(writer, it, .Other); + try writer.writeAll("])"); + }, + .infix => |ass| { + try writer.writeAll(ass); + try f.writeCValue(writer, operand, .Other); + try writer.writeAll("["); + try f.writeCValue(writer, it, .Other); + try writer.writeAll("]"); + }, + .ternary => |cmp| { + try writer.writeAll(" = "); + try f.writeCValue(writer, accum, .Other); + try writer.writeAll(cmp); + try f.writeCValue(writer, operand, .Other); + try writer.writeAll("["); + try f.writeCValue(writer, it, .Other); + try writer.writeAll("] ? "); + try f.writeCValue(writer, accum, .Other); + try writer.writeAll(" : "); + try f.writeCValue(writer, operand, .Other); + try writer.writeAll("["); + try f.writeCValue(writer, it, .Other); + try writer.writeAll("]"); + }, + } + + try writer.writeAll(";\n"); + + return accum; } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { @@ -5234,7 +5393,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst_ty, mutability); try writer.writeAll(" = "); switch (inst_ty.zigTypeTag()) { - .Array => { + .Array, .Vector => { const elem_ty = inst_ty.childType(); try writer.writeByte('{'); var empty = true; @@ -5354,7 +5513,6 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); }, }, - .Vector => return f.fail("TODO: C backend: implement airAggregateInit for vectors", .{}), else => unreachable, } @@ -5868,7 +6026,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T fn lowersToArray(ty: Type, target: std.Target) bool { return switch (ty.zigTypeTag()) { - .Array => return true, + .Array, .Vector => return true, else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null, }; } @@ -5877,7 +6035,7 @@ fn loweredArrayInfo(ty: Type, target: std.Target) ?Type.ArrayInfo { if (!lowersToArray(ty, target)) return null; switch (ty.zigTypeTag()) { - .Array => return ty.arrayInfo(), + .Array, .Vector => return ty.arrayInfo(), else => { const abi_size = ty.abiSize(target); const abi_align = ty.abiAlignment(target); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 67073ac56e..a78b201a0a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -16,7 +16,6 @@ const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const target_util = @import("../target.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const LazySrcLoc = Module.LazySrcLoc; @@ -27,6 +26,12 @@ const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); const arm_c_abi = @import("../arch/arm/abi.zig"); const riscv_c_abi = @import("../arch/riscv64/abi.zig"); +const target_util = @import("../target.zig"); +const libcFloatPrefix = target_util.libcFloatPrefix; +const libcFloatSuffix = target_util.libcFloatSuffix; +const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev; +const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; + const Error = error{ OutOfMemory, CodegenFail }; pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { @@ -7328,46 +7333,6 @@ pub const FuncGen = struct { }; } - fn libcFloatPrefix(float_bits: u16) []const u8 { - return switch (float_bits) { - 16, 80 => "__", - 32, 64, 128 => "", - else => unreachable, - }; - } - - fn libcFloatSuffix(float_bits: u16) []const u8 { - return switch (float_bits) { - 16 => "h", // Non-standard - 32 => "f", - 64 => "", - 80 => "x", // Non-standard - 128 => "q", // Non-standard (mimics convention in GCC libquadmath) - else => unreachable, - }; - } - - fn compilerRtFloatAbbrev(float_bits: u16) []const u8 { - return switch (float_bits) { - 16 => "h", - 32 => "s", - 64 => "d", - 80 => "x", - 128 => "t", - else => unreachable, - }; - } - - fn compilerRtIntAbbrev(bits: u16) []const u8 { - return switch (bits) { - 16 => "h", - 32 => "s", - 64 => "d", - 128 => "t", - else => "o", // Non-standard - }; - } - /// Creates a floating point comparison by lowering to the appropriate /// hardware instruction or softfloat routine for the target fn buildFloatCmp( @@ -9034,12 +8999,10 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); const reduce = self.air.instructions.items(.data)[inst].reduce; - var operand = try self.resolveInst(reduce.operand); + const operand = try self.resolveInst(reduce.operand); const operand_ty = self.air.typeOf(reduce.operand); const scalar_ty = self.air.typeOfIndex(inst); - // TODO handle the fast math setting - switch (reduce.operation) { .And => return self.builder.buildAndReduce(operand), .Or => return self.builder.buildOrReduce(operand), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9879bc7f35..ada3918baf 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -10,6 +10,7 @@ const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); +const Zir = @import("../Zir.zig"); const Liveness = @import("../Liveness.zig"); const spec = @import("spirv/spec.zig"); @@ -22,6 +23,7 @@ const IdResultType = spec.IdResultType; const SpvModule = @import("spirv/Module.zig"); const SpvSection = @import("spirv/Section.zig"); const SpvType = @import("spirv/type.zig").Type; +const SpvAssembler = @import("spirv/Assembler.zig"); const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef); @@ -37,10 +39,13 @@ pub const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, struct { /// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that. pub const DeclGen = struct { + /// A general-purpose allocator that can be used for any allocations for this DeclGen. + gpa: Allocator, + /// The Zig module that we are generating decls for. module: *Module, - /// The SPIR-V module code should be put in. + /// The SPIR-V module that instructions should be emitted into. spv: *SpvModule, /// The decl we are currently generating code for. @@ -71,18 +76,14 @@ pub const DeclGen = struct { /// The label of the SPIR-V block we are currently generating. current_block_label_id: IdRef, - /// The actual instructions for this function. We need to declare all locals in - /// the first block, and because we don't know which locals there are going to be, - /// we're just going to generate everything after the locals-section in this array. - /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the - /// initial OpLabel. These will be generated into spv.sections.functions directly. - code: SpvSection = .{}, + /// The code (prologue and body) for the function we are currently generating code for. + func: SpvModule.Fn = .{}, /// If `gen` returned `Error.CodegenFail`, this contains an explanatory message. /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, - /// Possible errors the `gen` function may return. + /// Possible errors the `genDecl` function may return. const Error = error{ CodegenFail, OutOfMemory }; /// This structure is used to return information about a type typically used for @@ -132,8 +133,9 @@ pub const DeclGen = struct { /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, /// only set when `gen` is called. - pub fn init(module: *Module, spv: *SpvModule) DeclGen { + pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen { return .{ + .gpa = allocator, .module = module, .spv = spv, .decl = undefined, @@ -158,12 +160,19 @@ pub const DeclGen = struct { self.inst_results.clearRetainingCapacity(); self.blocks.clearRetainingCapacity(); self.current_block_label_id = undefined; - self.code.reset(); + self.func.reset(); self.error_msg = null; self.genDecl() catch |err| switch (err) { error.CodegenFail => return self.error_msg, - else => |others| return others, + else => |others| { + // There might be an error that happened *after* self.error_msg + // was already allocated, so be sure to free it. + if (self.error_msg) |error_msg| { + error_msg.deinit(self.module.gpa); + } + return others; + }, }; return null; @@ -171,18 +180,18 @@ pub const DeclGen = struct { /// Free resources owned by the DeclGen. pub fn deinit(self: *DeclGen) void { - self.args.deinit(self.spv.gpa); - self.inst_results.deinit(self.spv.gpa); - self.blocks.deinit(self.spv.gpa); - self.code.deinit(self.spv.gpa); + self.args.deinit(self.gpa); + self.inst_results.deinit(self.gpa); + self.blocks.deinit(self.gpa); + self.func.deinit(self.gpa); } /// Return the target which we are currently compiling for. - fn getTarget(self: *DeclGen) std.Target { + pub fn getTarget(self: *DeclGen) std.Target { return self.module.getTarget(); } - fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { + pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const src = LazySrcLoc.nodeOffset(0); const src_loc = src.toSrcLoc(self.decl); @@ -191,13 +200,8 @@ pub const DeclGen = struct { return error.CodegenFail; } - fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { - @setCold(true); - const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(self.decl); - assert(self.error_msg == null); - self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "TODO (SPIR-V): " ++ format, args); - return error.CodegenFail; + pub fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { + return self.fail("TODO (SPIR-V): " ++ format, args); } /// Fetch the result-id for a previously generated instruction or constant. @@ -214,7 +218,7 @@ pub const DeclGen = struct { /// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to /// keep track of the previous block. fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void { - try self.code.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id }); + try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id }); self.current_block_label_id = label_id.toRef(); } @@ -320,6 +324,17 @@ pub const DeclGen = struct { /// Generate a constant representing `val`. /// TODO: Deduplication? fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!IdRef { + if (ty.zigTypeTag() == .Fn) { + const fn_decl_index = switch (val.tag()) { + .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, + .function => val.castTag(.function).?.data.owner_decl, + else => unreachable, + }; + const decl = self.module.declPtr(fn_decl_index); + self.module.markDeclAlive(decl); + return decl.fn_link.spirv.id.toRef(); + } + const target = self.getTarget(); const section = &self.spv.sections.types_globals_constants; const result_id = self.spv.allocId(); @@ -387,7 +402,27 @@ pub const DeclGen = struct { .value = value, }); }, + .Vector => switch (val.tag()) { + .aggregate => { + const elem_vals = val.castTag(.aggregate).?.data; + const vector_len = @intCast(usize, ty.vectorLen()); + const elem_ty = ty.elemType(); + + const elem_refs = try self.gpa.alloc(IdRef, vector_len); + defer self.gpa.free(elem_refs); + for (elem_refs) |*elem, i| { + elem.* = try self.genConstant(elem_ty, elem_vals[i]); + } + try section.emit(self.spv.gpa, .OpConstantComposite, .{ + .id_result_type = result_type_id, + .id_result = result_id, + .constituents = elem_refs, + }); + }, + else => unreachable, // TODO + }, .Void => unreachable, + .Fn => unreachable, else => return self.todo("constant generation of type {}", .{ty.fmtDebug()}), } @@ -396,7 +431,8 @@ pub const DeclGen = struct { /// Turn a Zig type into a SPIR-V Type, and return its type result-id. fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType { - return self.spv.typeResultId(try self.resolveType(ty)); + const type_ref = try self.resolveType(ty); + return self.spv.typeResultId(type_ref); } /// Turn a Zig type into a SPIR-V Type, and return a reference to it. @@ -447,8 +483,8 @@ pub const DeclGen = struct { break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, .Fn => blk: { - // We only support zig-calling-convention functions, no varargs. - if (ty.fnCallingConvention() != .Unspecified) + // We only support C-calling-convention functions for now, no varargs. + if (ty.fnCallingConvention() != .C) return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); @@ -464,11 +500,19 @@ pub const DeclGen = struct { payload.* = .{ .return_type = return_type, .parameters = param_types }; break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, - .Pointer => { - // This type can now be properly implemented, but we still need to implement the storage classes as proper address spaces. - return self.todo("Implement type Pointer properly", .{}); + .Pointer => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Pointer); + payload.* = .{ + .storage_class = spirvStorageClass(ty.ptrAddressSpace()), + .child_type = try self.resolveType(ty.elemType()), + .array_stride = 0, + // Note: only available in Kernels! + .alignment = null, + .max_byte_offset = null, + }; + break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, - .Vector => { + .Vector => blk: { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. // Note: SPIR-V vectors only support bools, ints and floats, so pointer vectors need to be supported another way. @@ -476,8 +520,14 @@ pub const DeclGen = struct { // TODO: The SPIR-V spec mentions that vector sizes may be quite restricted! look into which we can use, and whether OpTypeVector // is adequate at all for this. - // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. - return self.todo("Implement type Vector", .{}); + // TODO: Properly verify sizes and child type. + + const payload = try self.spv.arena.create(SpvType.Payload.Vector); + payload.* = .{ + .component_type = try self.resolveType(ty.elemType()), + .component_count = @intCast(u32, ty.vectorLen()), + }; + break :blk try self.spv.resolveType(SpvType.initPayload(&payload.base)); }, .Null, @@ -494,25 +544,14 @@ pub const DeclGen = struct { }; } - /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. - /// TODO: The result of this needs to be cached. - fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !IdResultType { - assert(ty.zigTypeTag() == .Pointer); - - const result_id = self.spv.allocId(); - - // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types - // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. - // These also relates to the pointer's address space. - const child_id = try self.resolveTypeId(ty.elemType()); - - try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{ - .id_result = result_id, - .storage_class = storage_class, - .type = child_id.toRef(), - }); - - return result_id.toResultType(); + fn spirvStorageClass(as: std.builtin.AddressSpace) spec.StorageClass { + return switch (as) { + .generic => .Generic, // TODO: Disallow? + .gs, .fs, .ss => unreachable, + .shared => .Workgroup, + .local => .Private, + .global, .param, .constant => unreachable, + }; } fn genDecl(self: *DeclGen) !void { @@ -522,7 +561,7 @@ pub const DeclGen = struct { if (decl.val.castTag(.function)) |_| { assert(decl.ty.zigTypeTag() == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); - try self.spv.sections.functions.emit(self.spv.gpa, .OpFunction, .{ + try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), .id_result = result_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. @@ -532,11 +571,11 @@ pub const DeclGen = struct { const params = decl.ty.fnParamLen(); var i: usize = 0; - try self.args.ensureUnusedCapacity(self.spv.gpa, params); + try self.args.ensureUnusedCapacity(self.gpa, params); while (i < params) : (i += 1) { const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); const arg_result_id = self.spv.allocId(); - try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionParameter, .{ + try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, .id_result = arg_result_id, }); @@ -546,9 +585,9 @@ pub const DeclGen = struct { // TODO: This could probably be done in a better way... const root_block_id = self.spv.allocId(); - // We need to generate the label directly in the functions section here because we're going to write the local variables after - // here. Since we're not generating in self.code, we're just going to bypass self.beginSpvBlock here. - try self.spv.sections.functions.emit(self.spv.gpa, .OpLabel, .{ + // The root block of a function declaration should appear before OpVariable instructions, + // so it is generated into the function's prologue. + try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{ .id_result = root_block_id, }); self.current_block_label_id = root_block_id.toRef(); @@ -557,8 +596,8 @@ pub const DeclGen = struct { try self.genBody(main_body); // Append the actual code into the functions section. - try self.spv.sections.functions.append(self.spv.gpa, self.code); - try self.spv.sections.functions.emit(self.spv.gpa, .OpFunctionEnd, {}); + try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); + try self.spv.addFunction(self.func); } else { // TODO // return self.todo("generate decl type {}", .{decl.ty.zigTypeTag()}); @@ -579,6 +618,8 @@ pub const DeclGen = struct { .sub, .subwrap => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub), .mul, .mulwrap => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul), + .shuffle => try self.airShuffle(inst), + .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd), .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr), .xor => try self.airBinOpSimple(inst, .OpBitwiseXor), @@ -608,14 +649,18 @@ pub const DeclGen = struct { .ret => return self.airRet(inst), .store => return self.airStore(inst), .unreach => return self.airUnreach(), + .assembly => (try self.airAssembly(inst)) orelse return, + + .dbg_var_ptr => return, + .dbg_var_val => return, + .dbg_block_begin => return, + .dbg_block_end => return, // zig fmt: on - else => |tag| return self.todo("implement AIR tag {s}", .{ - @tagName(tag), - }), + else => |tag| return self.todo("implement AIR tag {s}", .{@tagName(tag)}), }; - try self.inst_results.putNoClobber(self.spv.gpa, inst, result_id); + try self.inst_results.putNoClobber(self.gpa, inst, result_id); } fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, comptime opcode: Opcode) !IdRef { @@ -624,7 +669,7 @@ pub const DeclGen = struct { const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); - try self.code.emit(self.spv.gpa, opcode, .{ + try self.func.body.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, .operand_1 = lhs_id, @@ -680,9 +725,9 @@ pub const DeclGen = struct { }; switch (opcode_index) { - 0 => try self.code.emit(self.spv.gpa, fop, operands), - 1 => try self.code.emit(self.spv.gpa, sop, operands), - 2 => try self.code.emit(self.spv.gpa, uop, operands), + 0 => try self.func.body.emit(self.spv.gpa, fop, operands), + 1 => try self.func.body.emit(self.spv.gpa, sop, operands), + 2 => try self.func.body.emit(self.spv.gpa, uop, operands), else => unreachable, } // TODO: Trap on overflow? Probably going to be annoying. @@ -691,6 +736,41 @@ pub const DeclGen = struct { return result_id.toRef(); } + fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !IdRef { + const ty = self.air.typeOfIndex(inst); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; + const a = try self.resolve(extra.a); + const b = try self.resolve(extra.b); + const mask = self.air.values[extra.mask]; + const mask_len = extra.mask_len; + const a_len = self.air.typeOf(extra.a).vectorLen(); + + const result_id = self.spv.allocId(); + const result_type_id = try self.resolveTypeId(ty); + // Similar to LLVM, SPIR-V uses indices larger than the length of the first vector + // to index into the second vector. + try self.func.body.emitRaw(self.spv.gpa, .OpVectorShuffle, 4 + mask_len); + self.func.body.writeOperand(spec.IdResultType, result_type_id); + self.func.body.writeOperand(spec.IdResult, result_id); + self.func.body.writeOperand(spec.IdRef, a); + self.func.body.writeOperand(spec.IdRef, b); + + var i: usize = 0; + while (i < mask_len) : (i += 1) { + var buf: Value.ElemValueBuffer = undefined; + const elem = mask.elemValueBuffer(self.module, i, &buf); + if (elem.isUndef()) { + self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); + } else { + const int = elem.toSignedInt(); + const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); + self.func.body.writeOperand(spec.LiteralInteger, unsigned); + } + } + return result_id.toRef(); + } + fn airCmp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); @@ -727,9 +807,9 @@ pub const DeclGen = struct { }; switch (opcode_index) { - 0 => try self.code.emit(self.spv.gpa, fop, operands), - 1 => try self.code.emit(self.spv.gpa, sop, operands), - 2 => try self.code.emit(self.spv.gpa, uop, operands), + 0 => try self.func.body.emit(self.spv.gpa, fop, operands), + 1 => try self.func.body.emit(self.spv.gpa, sop, operands), + 2 => try self.func.body.emit(self.spv.gpa, uop, operands), else => unreachable, } @@ -741,7 +821,7 @@ pub const DeclGen = struct { const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(Type.initTag(.bool)); - try self.code.emit(self.spv.gpa, .OpLogicalNot, .{ + try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{ .id_result_type = result_type_id, .id_result = result_id, .operand = operand_id, @@ -751,13 +831,18 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !IdRef { const ty = self.air.typeOfIndex(inst); - const storage_class = spec.StorageClass.Function; - const result_type_id = try self.genPointerType(ty, storage_class); + const result_type_id = try self.resolveTypeId(ty); const result_id = self.spv.allocId(); // Rather than generating into code here, we're just going to generate directly into the functions section so that // variable declarations appear in the first block of the function. - try self.spv.sections.functions.emit(self.spv.gpa, .OpVariable, .{ + const storage_class = spirvStorageClass(ty.ptrAddressSpace()); + const section = if (storage_class == .Function) + &self.func.prologue + else + &self.spv.sections.types_globals_constants; + + try section.emit(self.spv.gpa, .OpVariable, .{ .id_result_type = result_type_id, .id_result = result_id, .storage_class = storage_class, @@ -779,15 +864,15 @@ pub const DeclGen = struct { const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. - var incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.spv.gpa, 4); + var incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.gpa, 4); - try self.blocks.putNoClobber(self.spv.gpa, inst, .{ + try self.blocks.putNoClobber(self.gpa, inst, .{ .label_id = label_id.toRef(), .incoming_blocks = &incoming_blocks, }); defer { assert(self.blocks.remove(inst)); - incoming_blocks.deinit(self.spv.gpa); + incoming_blocks.deinit(self.gpa); } const ty = self.air.typeOfIndex(inst); @@ -807,15 +892,14 @@ pub const DeclGen = struct { const result_id = self.spv.allocId(); // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types - // are not allowed to be created from a phi node, and throw an error for those. For now, resolveTypeId already throws - // an error for pointers. + // are not allowed to be created from a phi node, and throw an error for those. const result_type_id = try self.resolveTypeId(ty); _ = result_type_id; - try self.code.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... + try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... for (incoming_blocks.items) |incoming| { - self.code.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id }); + self.func.body.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id }); } return result_id.toRef(); @@ -829,10 +913,10 @@ pub const DeclGen = struct { if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); + try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } - try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id }); + try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id }); } fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { @@ -849,7 +933,7 @@ pub const DeclGen = struct { // TODO: We can generate OpSelectionMerge here if we know the target block that both of these will resolve to, // but i don't know if those will always resolve to the same block. - try self.code.emit(self.spv.gpa, .OpBranchConditional, .{ + try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{ .condition = condition_id, .true_label = then_label_id.toRef(), .false_label = else_label_id.toRef(), @@ -864,7 +948,7 @@ pub const DeclGen = struct { fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); - try self.code.emit(self.spv.gpa, .OpLine, .{ + try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = src_fname_id, .line = dbg_stmt.line, .column = dbg_stmt.column, @@ -883,7 +967,7 @@ pub const DeclGen = struct { .Volatile = ty.isVolatilePtr(), }; - try self.code.emit(self.spv.gpa, .OpLoad, .{ + try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = result_type_id, .id_result = result_id, .pointer = operand_id, @@ -900,13 +984,13 @@ pub const DeclGen = struct { const loop_label_id = self.spv.allocId(); // Jump to the loop entry point - try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); + try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); // TODO: Look into OpLoopMerge. try self.beginSpvBlock(loop_label_id); try self.genBody(body); - try self.code.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); + try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id.toRef() }); } fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { @@ -914,9 +998,9 @@ pub const DeclGen = struct { const operand_ty = self.air.typeOf(operand); if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(operand); - try self.code.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); + try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { - try self.code.emit(self.spv.gpa, .OpReturn, {}); + try self.func.body.emit(self.spv.gpa, .OpReturn, {}); } } @@ -930,7 +1014,7 @@ pub const DeclGen = struct { .Volatile = lhs_ty.isVolatilePtr(), }; - try self.code.emit(self.spv.gpa, .OpStore, .{ + try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = dst_ptr_id, .object = src_val_id, .memory_access = access, @@ -938,6 +1022,134 @@ pub const DeclGen = struct { } fn airUnreach(self: *DeclGen) !void { - try self.code.emit(self.spv.gpa, .OpUnreachable, {}); + try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); + } + + fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Asm, ty_pl.payload); + + const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; + const clobbers_len = @truncate(u31, extra.data.flags); + + if (!is_volatile and self.liveness.isUnused(inst)) return null; + + var extra_i: usize = extra.end; + const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + extra_i += outputs.len; + const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + extra_i += inputs.len; + + if (outputs.len > 1) { + return self.todo("implement inline asm with more than 1 output", .{}); + } + + var output_extra_i = extra_i; + for (outputs) |output| { + if (output != .none) { + return self.todo("implement inline asm with non-returned output", .{}); + } + const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); + extra_i += (constraint.len + name.len + (2 + 3)) / 4; + // TODO: Record output and use it somewhere. + } + + var input_extra_i = extra_i; + for (inputs) |input| { + const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); + const constraint = std.mem.sliceTo(extra_bytes, 0); + const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += (constraint.len + name.len + (2 + 3)) / 4; + // TODO: Record input and use it somewhere. + _ = input; + } + + { + var clobber_i: u32 = 0; + while (clobber_i < clobbers_len) : (clobber_i += 1) { + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + extra_i += clobber.len / 4 + 1; + // TODO: Record clobber and use it somewhere. + } + } + + const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; + + var as = SpvAssembler{ + .gpa = self.gpa, + .src = asm_source, + .spv = self.spv, + .func = &self.func, + }; + defer as.deinit(); + + for (inputs) |input| { + const extra_bytes = std.mem.sliceAsBytes(self.air.extra[input_extra_i..]); + const constraint = std.mem.sliceTo(extra_bytes, 0); + const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + input_extra_i += (constraint.len + name.len + (2 + 3)) / 4; + + const value = try self.resolve(input); + try as.value_map.put(as.gpa, name, .{ .value = value }); + } + + as.assemble() catch |err| switch (err) { + error.AssembleFail => { + // TODO: For now the compiler only supports a single error message per decl, + // so to translate the possible multiple errors from the assembler, emit + // them as notes here. + // TODO: Translate proper error locations. + assert(as.errors.items.len != 0); + assert(self.error_msg == null); + const loc = LazySrcLoc.nodeOffset(0); + const src_loc = loc.toSrcLoc(self.decl); + self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); + const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); + + // Sub-scope to prevent `return error.CodegenFail` from running the errdefers. + { + errdefer self.module.gpa.free(notes); + var i: usize = 0; + errdefer for (notes[0..i]) |*note| { + note.deinit(self.module.gpa); + }; + + while (i < as.errors.items.len) : (i += 1) { + notes[i] = try Module.ErrorMsg.init(self.module.gpa, src_loc, "{s}", .{as.errors.items[i].msg}); + } + } + self.error_msg.?.notes = notes; + return error.CodegenFail; + }, + else => |others| return others, + }; + + for (outputs) |output| { + _ = output; + const extra_bytes = std.mem.sliceAsBytes(self.air.extra[output_extra_i..]); + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[output_extra_i..]), 0); + const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0); + output_extra_i += (constraint.len + name.len + (2 + 3)) / 4; + + const result = as.value_map.get(name) orelse return { + return self.fail("invalid asm output '{s}'", .{name}); + }; + + switch (result) { + .just_declared, .unresolved_forward_reference => unreachable, + .ty => return self.fail("cannot return spir-v type as value from assembly", .{}), + .value => |ref| return ref, + } + + // TODO: Multiple results + } + + return null; } }; diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig new file mode 100644 index 0000000000..fc4ab406b9 --- /dev/null +++ b/src/codegen/spirv/Assembler.zig @@ -0,0 +1,1017 @@ +const Assembler = @This(); + +const std = @import("std"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; + +const spec = @import("spec.zig"); +const Opcode = spec.Opcode; +const Word = spec.Word; +const IdRef = spec.IdRef; +const IdResult = spec.IdResult; + +const SpvModule = @import("Module.zig"); +const SpvType = @import("type.zig").Type; + +/// Represents a token in the assembly template. +const Token = struct { + tag: Tag, + start: u32, + end: u32, + + const Tag = enum { + /// Returned when there was no more input to match. + eof, + /// %identifier + result_id, + /// %identifier when appearing on the LHS of an equals sign. + /// While not technically a token, its relatively easy to resolve + /// this during lexical analysis and relieves a bunch of headaches + /// during parsing. + result_id_assign, + /// Mask, int, or float. These are grouped together as some + /// SPIR-V enumerants look a bit like integers as well (for example + /// "3D"), and so it is easier to just interpret them as the expected + /// type when resolving an instruction's operands. + value, + /// An enumerant that looks like an opcode, that is, OpXxxx. + /// Not necessarily a *valid* opcode. + opcode, + /// String literals. + /// Note, this token is also returned for unterminated + /// strings. In this case the closing " is not present. + string, + /// |. + pipe, + /// =. + equals, + + fn name(self: Tag) []const u8 { + return switch (self) { + .eof => "<end of input>", + .result_id => "<result-id>", + .result_id_assign => "<assigned result-id>", + .value => "<value>", + .opcode => "<opcode>", + .string => "<string literal>", + .pipe => "'|'", + .equals => "'='", + }; + } + }; +}; + +/// This union represents utility information for a decoded operand. +/// Note that this union only needs to maintain a minimal amount of +/// bookkeeping: these values are enough to either decode the operands +/// into a spec type, or emit it directly into its binary form. +const Operand = union(enum) { + /// Any 'simple' 32-bit value. This could be a mask or + /// enumerant, etc, depending on the operands. + value: u32, + + /// An int- or float literal encoded as 1 word. This may be + /// a 32-bit literal or smaller, already in the proper format: + /// the opper bits are 0 for floats and unsigned ints, and sign-extended + /// for signed ints. + literal32: u32, + + /// An int- or float literal encoded as 2 words. This may be a 33-bit + /// to 64 bit literal, already in the proper format: + /// the opper bits are 0 for floats and unsigned ints, and sign-extended + /// for signed ints. + literal64: u64, + + /// A result-id which is assigned to in this instruction. If present, + /// this is the first operand of the instruction. + result_id: AsmValue.Ref, + + /// A result-id which referred to (not assigned to) in this instruction. + ref_id: AsmValue.Ref, + + /// Offset into `inst.string_bytes`. The string ends at the next zero-terminator. + string: u32, +}; + +/// A structure representing an error message that the assembler may return, when +/// the assembly source is not syntactically or semantically correct. +const ErrorMsg = struct { + /// The offset in bytes from the start of `src` that this error occured. + byte_offset: u32, + /// An explanatory error message. + /// Memory is owned by `self.gpa`. TODO: Maybe allocate this with an arena + /// allocator if it is needed elsewhere? + msg: []const u8, +}; + +/// Possible errors the `assemble` function may return. +const Error = error{ AssembleFail, OutOfMemory }; + +/// This union is used to keep track of results of spir-v instructions. This can either be just a plain +/// result-id, in the case of most instructions, or for example a type that is constructed from +/// an OpTypeXxx instruction. +const AsmValue = union(enum) { + /// The results are stored in an array hash map, and can be referred to either by name (without the %), + /// or by values of this index type. + pub const Ref = u32; + + /// This result-value is the RHS of the current instruction. + just_declared, + + /// This is used as placeholder for ref-ids of which the result-id is not yet known. + /// It will be further resolved at a later stage to a more concrete forward reference. + unresolved_forward_reference, + + /// This result-value is a normal result produced by a different instruction. + value: IdRef, + + /// This result-value represents a type registered into the module's type system. + ty: SpvType.Ref, + + /// Retrieve the result-id of this AsmValue. Asserts that this AsmValue + /// is of a variant that allows the result to be obtained (not an unresolved + /// forward declaration, not in the process of being declared, etc). + pub fn resultId(self: AsmValue, spv: *const SpvModule) IdRef { + return switch (self) { + .just_declared, .unresolved_forward_reference => unreachable, + .value => |result| result, + .ty => |ref| spv.typeResultId(ref).toRef(), + }; + } +}; + +/// This map type maps results to values. Results can be addressed either by name (without the %), or by +/// AsmValue.Ref in AsmValueMap.keys/.values. +const AsmValueMap = std.StringArrayHashMapUnmanaged(AsmValue); + +/// An allocator used for common allocations. +gpa: Allocator, + +/// A list of errors that occured during processing the assembly. +errors: std.ArrayListUnmanaged(ErrorMsg) = .{}, + +/// The source code that is being assembled. +src: []const u8, + +/// The module that this assembly is associated to. +/// Instructions like OpType*, OpDecorate, etc are emitted into this module. +spv: *SpvModule, + +/// The function that the function-specific instructions should be emitted to. +func: *SpvModule.Fn, + +/// `self.src` tokenized. +tokens: std.ArrayListUnmanaged(Token) = .{}, + +/// The token that is next during parsing. +current_token: u32 = 0, + +/// This field groups the properties of the instruction that is currently +/// being parsed or has just been parsed. +inst: struct { + /// The opcode of the current instruction. + opcode: Opcode = undefined, + /// Operands of the current instruction. + operands: std.ArrayListUnmanaged(Operand) = .{}, + /// This is where string data resides. Strings are zero-terminated. + string_bytes: std.ArrayListUnmanaged(u8) = .{}, + + /// Return a reference to the result of this instruction, if any. + fn result(self: @This()) ?AsmValue.Ref { + // The result, if present, is either the first or second + // operand of an instruction. + for (self.operands.items[0..@min(self.operands.items.len, 2)]) |op| { + switch (op) { + .result_id => |index| return index, + else => {}, + } + } + return null; + } +} = .{}, + +/// This map maps results to their tracked values. +value_map: AsmValueMap = .{}, + +/// Free the resources owned by this assembler. +pub fn deinit(self: *Assembler) void { + for (self.errors.items) |err| { + self.gpa.free(err.msg); + } + self.tokens.deinit(self.gpa); + self.errors.deinit(self.gpa); + self.inst.operands.deinit(self.gpa); + self.inst.string_bytes.deinit(self.gpa); + self.value_map.deinit(self.gpa); +} + +pub fn assemble(self: *Assembler) Error!void { + try self.tokenize(); + while (!self.testToken(.eof)) { + try self.parseInstruction(); + try self.processInstruction(); + } + if (self.errors.items.len > 0) + return error.AssembleFail; +} + +fn addError(self: *Assembler, offset: u32, comptime fmt: []const u8, args: anytype) !void { + const msg = try std.fmt.allocPrint(self.gpa, fmt, args); + errdefer self.gpa.free(msg); + try self.errors.append(self.gpa, .{ + .byte_offset = offset, + .msg = msg, + }); +} + +fn fail(self: *Assembler, offset: u32, comptime fmt: []const u8, args: anytype) Error { + try self.addError(offset, fmt, args); + return error.AssembleFail; +} + +fn todo(self: *Assembler, comptime fmt: []const u8, args: anytype) Error { + return self.fail(0, "todo: " ++ fmt, args); +} + +/// Attempt to process the instruction currently in `self.inst`. +/// This for example emits the instruction in the module or function, or +/// records type definitions. +/// If this function returns `error.AssembleFail`, an explanatory +/// error message has already been emitted into `self.errors`. +fn processInstruction(self: *Assembler) !void { + const result = switch (self.inst.opcode.class()) { + .TypeDeclaration => try self.processTypeInstruction(), + else => if (try self.processGenericInstruction()) |result| + result + else + return, + }; + + const result_ref = self.inst.result().?; + switch (self.value_map.values()[result_ref]) { + .just_declared => self.value_map.values()[result_ref] = result, + else => { + // TODO: Improve source location. + const name = self.value_map.keys()[result_ref]; + return self.fail(0, "duplicate definition of %{s}", .{name}); + }, + } +} + +/// Record `self.inst` into the module's type system, and return the AsmValue that +/// refers to the result. +fn processTypeInstruction(self: *Assembler) !AsmValue { + const operands = self.inst.operands.items; + const ty = switch (self.inst.opcode) { + .OpTypeVoid => SpvType.initTag(.void), + .OpTypeBool => SpvType.initTag(.bool), + .OpTypeInt => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Int); + const signedness: std.builtin.Signedness = switch (operands[2].literal32) { + 0 => .unsigned, + 1 => .signed, + else => { + // TODO: Improve source location. + return self.fail(0, "'{}' is not a valid signedness (expected 0 or 1)", .{operands[2].literal32}); + }, + }; + payload.* = .{ + .width = operands[1].literal32, + .signedness = signedness, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeFloat => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Float); + payload.* = .{ + .width = operands[1].literal32, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeVector => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Vector); + payload.* = .{ + .component_type = try self.resolveTypeRef(operands[1].ref_id), + .component_count = operands[2].literal32, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeMatrix => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Matrix); + payload.* = .{ + .column_type = try self.resolveTypeRef(operands[1].ref_id), + .column_count = operands[2].literal32, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeImage => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Image); + payload.* = .{ + .sampled_type = try self.resolveTypeRef(operands[1].ref_id), + .dim = @intToEnum(spec.Dim, operands[2].value), + .depth = switch (operands[3].literal32) { + 0 => .no, + 1 => .yes, + 2 => .maybe, + else => { + return self.fail(0, "'{}' is not a valid image depth (expected 0, 1 or 2)", .{operands[3].literal32}); + }, + }, + .arrayed = switch (operands[4].literal32) { + 0 => false, + 1 => true, + else => { + return self.fail(0, "'{}' is not a valid image arrayed-ness (expected 0 or 1)", .{operands[4].literal32}); + }, + }, + .multisampled = switch (operands[5].literal32) { + 0 => false, + 1 => true, + else => { + return self.fail(0, "'{}' is not a valid image multisampled-ness (expected 0 or 1)", .{operands[5].literal32}); + }, + }, + .sampled = switch (operands[6].literal32) { + 0 => .known_at_runtime, + 1 => .with_sampler, + 2 => .without_sampler, + else => { + return self.fail(0, "'{}' is not a valid image sampled-ness (expected 0, 1 or 2)", .{operands[6].literal32}); + }, + }, + .format = @intToEnum(spec.ImageFormat, operands[7].value), + .access_qualifier = if (operands.len > 8) + @intToEnum(spec.AccessQualifier, operands[8].value) + else + null, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeSampler => SpvType.initTag(.sampler), + .OpTypeSampledImage => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.SampledImage); + payload.* = .{ + .image_type = try self.resolveTypeRef(operands[1].ref_id), + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeArray => { + // TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant), + // and so some consideration must be taken when entering this in the type system. + return self.todo("process OpTypeArray", .{}); + }, + .OpTypeRuntimeArray => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.RuntimeArray); + payload.* = .{ + .element_type = try self.resolveTypeRef(operands[1].ref_id), + // TODO: Fetch array stride from decorations. + .array_stride = 0, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeOpaque => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Opaque); + const name_offset = operands[1].string; + payload.* = .{ + .name = std.mem.sliceTo(self.inst.string_bytes.items[name_offset..], 0), + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypePointer => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Pointer); + payload.* = .{ + .storage_class = @intToEnum(spec.StorageClass, operands[1].value), + .child_type = try self.resolveTypeRef(operands[2].ref_id), + // TODO: Fetch these values from decorations. + .array_stride = 0, + .alignment = null, + .max_byte_offset = null, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeFunction => blk: { + const param_operands = operands[2..]; + const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len); + for (param_types) |*param, i| { + param.* = try self.resolveTypeRef(param_operands[i].ref_id); + } + const payload = try self.spv.arena.create(SpvType.Payload.Function); + payload.* = .{ + .return_type = try self.resolveTypeRef(operands[1].ref_id), + .parameters = param_types, + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypeEvent => SpvType.initTag(.event), + .OpTypeDeviceEvent => SpvType.initTag(.device_event), + .OpTypeReserveId => SpvType.initTag(.reserve_id), + .OpTypeQueue => SpvType.initTag(.queue), + .OpTypePipe => blk: { + const payload = try self.spv.arena.create(SpvType.Payload.Pipe); + payload.* = .{ + .qualifier = @intToEnum(spec.AccessQualifier, operands[1].value), + }; + break :blk SpvType.initPayload(&payload.base); + }, + .OpTypePipeStorage => SpvType.initTag(.pipe_storage), + .OpTypeNamedBarrier => SpvType.initTag(.named_barrier), + else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}), + }; + + const ref = try self.spv.resolveType(ty); + return AsmValue{ .ty = ref }; +} + +/// Emit `self.inst` into `self.spv` and `self.func`, and return the AsmValue +/// that this produces (if any). This function processes common instructions: +/// - No forward references are allowed in operands. +/// - Target section is determined from instruction type. +/// - Function-local instructions are emitted in `self.func`. +fn processGenericInstruction(self: *Assembler) !?AsmValue { + const operands = self.inst.operands.items; + const section = switch (self.inst.opcode.class()) { + .ConstantCreation => &self.spv.sections.types_globals_constants, + .Annotation => &self.spv.sections.annotations, + .TypeDeclaration => unreachable, // Handled elsewhere. + else => switch (self.inst.opcode) { + .OpEntryPoint => &self.spv.sections.entry_points, + .OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes, + .OpVariable => switch (@intToEnum(spec.StorageClass, operands[2].value)) { + .Function => &self.func.prologue, + else => &self.spv.sections.types_globals_constants, + }, + // Default case - to be worked out further. + else => &self.func.body, + }, + }; + + var maybe_result_id: ?IdResult = null; + const first_word = section.instructions.items.len; + // At this point we're not quite sure how many operands this instruction is going to have, + // so insert 0 and patch up the actual opcode word later. + try section.ensureUnusedCapacity(self.spv.gpa, 1); + section.writeWord(0); + + for (operands) |operand| { + switch (operand) { + .value, .literal32 => |word| { + try section.ensureUnusedCapacity(self.spv.gpa, 1); + section.writeWord(word); + }, + .literal64 => |dword| { + try section.ensureUnusedCapacity(self.spv.gpa, 2); + section.writeDoubleWord(dword); + }, + .result_id => { + maybe_result_id = self.spv.allocId(); + try section.ensureUnusedCapacity(self.spv.gpa, 1); + section.writeOperand(IdResult, maybe_result_id.?); + }, + .ref_id => |index| { + const result = try self.resolveRef(index); + try section.ensureUnusedCapacity(self.spv.gpa, 1); + section.writeOperand(spec.IdRef, result.resultId(self.spv)); + }, + .string => |offset| { + const text = std.mem.sliceTo(self.inst.string_bytes.items[offset..], 0); + const size = std.math.divCeil(usize, text.len + 1, @sizeOf(Word)) catch unreachable; + try section.ensureUnusedCapacity(self.spv.gpa, size); + section.writeOperand(spec.LiteralString, text); + }, + } + } + + const actual_word_count = section.instructions.items.len - first_word; + section.instructions.items[first_word] |= @as(u32, @intCast(u16, actual_word_count)) << 16 | @enumToInt(self.inst.opcode); + + if (maybe_result_id) |result| { + return AsmValue{ .value = result.toRef() }; + } + return null; +} + +/// Resolve a value reference. This function makes sure that the reference is +/// not self-referential, but it does allow the result to be forward declared. +fn resolveMaybeForwardRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue { + const value = self.value_map.values()[ref]; + switch (value) { + .just_declared => { + const name = self.value_map.keys()[ref]; + // TODO: Improve source location. + return self.fail(0, "self-referential parameter %{s}", .{name}); + }, + else => return value, + } +} + +/// Resolve a value reference. This function +/// makes sure that the result is not self-referential, nor that it is forward declared. +fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue { + const value = try self.resolveMaybeForwardRef(ref); + switch (value) { + .just_declared => unreachable, + .unresolved_forward_reference => { + const name = self.value_map.keys()[ref]; + // TODO: Improve source location. + return self.fail(0, "reference to undeclared result-id %{s}", .{name}); + }, + else => return value, + } +} + +/// Resolve a value reference as type. +fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !SpvType.Ref { + const value = try self.resolveRef(ref); + switch (value) { + .just_declared, .unresolved_forward_reference => unreachable, + .ty => |ty_ref| return ty_ref, + else => { + const name = self.value_map.keys()[ref]; + // TODO: Improve source location. + return self.fail(0, "expected operand %{s} to refer to a type", .{name}); + }, + } +} + +/// Attempt to parse an instruction into `self.inst`. +/// If this function returns `error.AssembleFail`, an explanatory +/// error message has been emitted into `self.errors`. +fn parseInstruction(self: *Assembler) !void { + self.inst.opcode = undefined; + self.inst.operands.shrinkRetainingCapacity(0); + self.inst.string_bytes.shrinkRetainingCapacity(0); + + const lhs_result_tok = self.currentToken(); + const maybe_lhs_result = if (self.eatToken(.result_id_assign)) blk: { + const name = self.tokenText(lhs_result_tok)[1..]; + const entry = try self.value_map.getOrPut(self.gpa, name); + try self.expectToken(.equals); + if (!entry.found_existing) { + entry.value_ptr.* = .just_declared; + } + break :blk @intCast(AsmValue.Ref, entry.index); + } else null; + + const opcode_tok = self.currentToken(); + if (maybe_lhs_result != null) { + try self.expectToken(.opcode); + } else if (!self.eatToken(.opcode)) { + return self.fail(opcode_tok.start, "expected start of instruction, found {s}", .{opcode_tok.tag.name()}); + } + + const opcode_text = self.tokenText(opcode_tok); + @setEvalBranchQuota(10000); + self.inst.opcode = std.meta.stringToEnum(Opcode, opcode_text) orelse { + return self.fail(opcode_tok.start, "invalid opcode '{s}'", .{opcode_text}); + }; + + const expected_operands = self.inst.opcode.operands(); + // This is a loop because the result-id is not always the first operand. + const requires_lhs_result = for (expected_operands) |op| { + if (op.kind == .IdResult) break true; + } else false; + + if (requires_lhs_result and maybe_lhs_result == null) { + return self.fail(opcode_tok.start, "opcode '{s}' expects result on left-hand side", .{@tagName(self.inst.opcode)}); + } else if (!requires_lhs_result and maybe_lhs_result != null) { + return self.fail( + lhs_result_tok.start, + "opcode '{s}' does not expect a result-id on the left-hand side", + .{@tagName(self.inst.opcode)}, + ); + } + + for (expected_operands) |operand| { + if (operand.kind == .IdResult) { + try self.inst.operands.append(self.gpa, .{ .result_id = maybe_lhs_result.? }); + continue; + } + + switch (operand.quantifier) { + .required => if (self.isAtInstructionBoundary()) { + return self.fail( + self.currentToken().start, + "missing required operand", // TODO: Operand name? + .{}, + ); + } else { + try self.parseOperand(operand.kind); + }, + .optional => if (!self.isAtInstructionBoundary()) { + try self.parseOperand(operand.kind); + }, + .variadic => while (!self.isAtInstructionBoundary()) { + try self.parseOperand(operand.kind); + }, + } + } +} + +/// Parse a single operand of a particular type. +fn parseOperand(self: *Assembler, kind: spec.OperandKind) Error!void { + switch (kind.category()) { + .bit_enum => try self.parseBitEnum(kind), + .value_enum => try self.parseValueEnum(kind), + .id => try self.parseRefId(), + else => switch (kind) { + .LiteralInteger => try self.parseLiteralInteger(), + .LiteralString => try self.parseString(), + .LiteralContextDependentNumber => try self.parseContextDependentNumber(), + .PairIdRefIdRef => try self.parsePhiSource(), + else => return self.todo("parse operand of type {s}", .{@tagName(kind)}), + }, + } +} + +/// Also handles parsing any required extra operands. +fn parseBitEnum(self: *Assembler, kind: spec.OperandKind) !void { + var tok = self.currentToken(); + try self.expectToken(.value); + + var text = self.tokenText(tok); + if (std.mem.eql(u8, text, "None")) { + try self.inst.operands.append(self.gpa, .{ .value = 0 }); + return; + } + + const enumerants = kind.enumerants(); + var mask: u32 = 0; + while (true) { + const enumerant = for (enumerants) |enumerant| { + if (std.mem.eql(u8, enumerant.name, text)) + break enumerant; + } else { + return self.fail(tok.start, "'{s}' is not a valid flag for bitmask {s}", .{ text, @tagName(kind) }); + }; + mask |= enumerant.value; + if (!self.eatToken(.pipe)) + break; + + tok = self.currentToken(); + try self.expectToken(.value); + text = self.tokenText(tok); + } + + try self.inst.operands.append(self.gpa, .{ .value = mask }); + + // Assume values are sorted. + // TODO: ensure in generator. + for (enumerants) |enumerant| { + if ((mask & enumerant.value) == 0) + continue; + + for (enumerant.parameters) |param_kind| { + if (self.isAtInstructionBoundary()) { + return self.fail(self.currentToken().start, "missing required parameter for bit flag '{s}'", .{enumerant.name}); + } + + try self.parseOperand(param_kind); + } + } +} + +/// Also handles parsing any required extra operands. +fn parseValueEnum(self: *Assembler, kind: spec.OperandKind) !void { + const tok = self.currentToken(); + try self.expectToken(.value); + + const text = self.tokenText(tok); + const enumerant = for (kind.enumerants()) |enumerant| { + if (std.mem.eql(u8, enumerant.name, text)) + break enumerant; + } else { + return self.fail(tok.start, "'{s}' is not a valid value for enumeration {s}", .{ text, @tagName(kind) }); + }; + + try self.inst.operands.append(self.gpa, .{ .value = enumerant.value }); + + for (enumerant.parameters) |param_kind| { + if (self.isAtInstructionBoundary()) { + return self.fail(self.currentToken().start, "missing required parameter for enum variant '{s}'", .{enumerant.name}); + } + + try self.parseOperand(param_kind); + } +} + +fn parseRefId(self: *Assembler) !void { + const tok = self.currentToken(); + try self.expectToken(.result_id); + + const name = self.tokenText(tok)[1..]; + const entry = try self.value_map.getOrPut(self.gpa, name); + if (!entry.found_existing) { + entry.value_ptr.* = .unresolved_forward_reference; + } + + const index = @intCast(AsmValue.Ref, entry.index); + try self.inst.operands.append(self.gpa, .{ .ref_id = index }); +} + +fn parseLiteralInteger(self: *Assembler) !void { + const tok = self.currentToken(); + try self.expectToken(.value); + // According to the SPIR-V machine readable grammar, a LiteralInteger + // may consist of one or more words. From the SPIR-V docs it seems like there + // only one instruction where multiple words are allowed, the literals that make up the + // switch cases of OpSwitch. This case is handled separately, and so we just assume + // everything is a 32-bit integer in this function. + const text = self.tokenText(tok); + const value = std.fmt.parseInt(u32, text, 0) catch { + return self.fail(tok.start, "'{s}' is not a valid 32-bit integer literal", .{text}); + }; + try self.inst.operands.append(self.gpa, .{ .literal32 = value }); +} + +fn parseString(self: *Assembler) !void { + const tok = self.currentToken(); + try self.expectToken(.string); + // Note, the string might not have a closing quote. In this case, + // an error is already emitted but we are trying to continue processing + // anyway, so in this function we have to deal with that situation. + const text = self.tokenText(tok); + assert(text.len > 0 and text[0] == '"'); + const literal = if (text.len != 1 and text[text.len - 1] == '"') + text[1 .. text.len - 1] + else + text[1..]; + + const string_offset = @intCast(u32, self.inst.string_bytes.items.len); + try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1); + self.inst.string_bytes.appendSliceAssumeCapacity(literal); + self.inst.string_bytes.appendAssumeCapacity(0); + + try self.inst.operands.append(self.gpa, .{ .string = string_offset }); +} + +fn parseContextDependentNumber(self: *Assembler) !void { + // For context dependent numbers, the actual type to parse is determined by the instruction. + // Currently, this operand appears in OpConstant and OpSpecConstant, where the too-be-parsed type + // is determined by the result type. That means that in this instructions we have to resolve the + // operand type early and look at the result to see how we need to proceed. + assert(self.inst.opcode == .OpConstant or self.inst.opcode == .OpSpecConstant); + + const tok = self.currentToken(); + const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id); + const result_type = self.spv.type_cache.keys()[result_type_ref]; + switch (result_type.tag()) { + .int => { + const int = result_type.castTag(.int).?; + try self.parseContextDependentInt(int.signedness, int.width); + }, + .float => { + const width = result_type.castTag(.float).?.width; + switch (width) { + 16 => try self.parseContextDependentFloat(16), + 32 => try self.parseContextDependentFloat(32), + 64 => try self.parseContextDependentFloat(64), + else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{width}), + } + }, + else => return self.fail(tok.start, "cannot parse literal constant {s}", .{@tagName(result_type.tag())}), + } +} + +fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness, width: u32) !void { + const tok = self.currentToken(); + try self.expectToken(.value); + + if (width == 0 or width > 2 * @bitSizeOf(spec.Word)) { + return self.fail(tok.start, "cannot parse {}-bit integer literal", .{width}); + } + + const text = self.tokenText(tok); + invalid: { + // Just parse the integer as the next larger integer type, and check if it overflows afterwards. + const int = std.fmt.parseInt(i128, text, 0) catch break :invalid; + const min = switch (signedness) { + .unsigned => 0, + .signed => -(@as(i128, 1) << (@intCast(u7, width) - 1)), + }; + const max = (@as(i128, 1) << (@intCast(u7, width) - @boolToInt(signedness == .signed))) - 1; + if (int < min or int > max) { + break :invalid; + } + + // Note, we store the sign-extended version here. + if (width <= @bitSizeOf(spec.Word)) { + try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(u32, @bitCast(u128, int)) }); + } else { + try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(u64, @bitCast(u128, int)) }); + } + return; + } + + return self.fail(tok.start, "'{s}' is not a valid {s} {}-bit int literal", .{ text, @tagName(signedness), width }); +} + +fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void { + const Float = std.meta.Float(width); + const Int = std.meta.Int(.unsigned, width); + + const tok = self.currentToken(); + try self.expectToken(.value); + + const text = self.tokenText(tok); + + const value = std.fmt.parseFloat(Float, text) catch { + return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width }); + }; + + const float_bits = @bitCast(Int, value); + if (width <= @bitSizeOf(spec.Word)) { + try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits }); + } else { + assert(width <= 2 * @bitSizeOf(spec.Word)); + try self.inst.operands.append(self.gpa, .{ .literal64 = float_bits }); + } +} + +fn parsePhiSource(self: *Assembler) !void { + try self.parseRefId(); + if (self.isAtInstructionBoundary()) { + return self.fail(self.currentToken().start, "missing phi block parent", .{}); + } + try self.parseRefId(); +} + +/// Returns whether the `current_token` cursor is currently pointing +/// at the start of a new instruction. +fn isAtInstructionBoundary(self: Assembler) bool { + return switch (self.currentToken().tag) { + .opcode, .result_id_assign, .eof => true, + else => false, + }; +} + +fn expectToken(self: *Assembler, tag: Token.Tag) !void { + if (self.eatToken(tag)) + return; + + return self.fail(self.currentToken().start, "unexpected {s}, expected {s}", .{ + self.currentToken().tag.name(), + tag.name(), + }); +} + +fn eatToken(self: *Assembler, tag: Token.Tag) bool { + if (self.testToken(tag)) { + self.current_token += 1; + return true; + } + return false; +} + +fn testToken(self: Assembler, tag: Token.Tag) bool { + return self.currentToken().tag == tag; +} + +fn currentToken(self: Assembler) Token { + return self.tokens.items[self.current_token]; +} + +fn tokenText(self: Assembler, tok: Token) []const u8 { + return self.src[tok.start..tok.end]; +} + +/// Tokenize `self.src` and put the tokens in `self.tokens`. +/// Any errors encountered are appended to `self.errors`. +fn tokenize(self: *Assembler) !void { + var offset: u32 = 0; + while (true) { + const tok = try self.nextToken(offset); + // Resolve result-id assignment now. + // Note: If the previous token wasn't a result-id, just ignore it, + // we will catch it while parsing. + if (tok.tag == .equals and self.tokens.items[self.tokens.items.len - 1].tag == .result_id) { + self.tokens.items[self.tokens.items.len - 1].tag = .result_id_assign; + } + try self.tokens.append(self.gpa, tok); + if (tok.tag == .eof) + break; + offset = tok.end; + } +} + +/// Retrieve the next token from the input. This function will assert +/// that the token is surrounded by whitespace if required, but will not +/// interpret the token yet. +/// Note: This function doesn't handle .result_id_assign - this is handled in +/// tokenize(). +fn nextToken(self: *Assembler, start_offset: u32) !Token { + // We generally separate the input into the following types: + // - Whitespace. Generally ignored, but also used as delimiter for some + // tokens. + // - Values. This entails integers, floats, enums - anything that + // consists of alphanumeric characters, delimited by whitespace. + // - Result-IDs. This entails anything that consists of alphanumeric characters and _, and + // starts with a %. In contrast to values, this entity can be checked for complete correctness + // relatively easily here. + // - Strings. This entails quote-delimited text such as "abc". + // SPIR-V strings have only two escapes, \" and \\. + // - Sigils, = and |. In this assembler, these are not required to have whitespace + // around them (they act as delimiters) as they do in SPIRV-Tools. + + var state: enum { + start, + value, + result_id, + string, + string_end, + escape, + } = .start; + var token_start = start_offset; + var offset = start_offset; + var tag = Token.Tag.eof; + while (offset < self.src.len) : (offset += 1) { + const c = self.src[offset]; + switch (state) { + .start => switch (c) { + ' ', '\t', '\r', '\n' => token_start = offset + 1, + '"' => { + state = .string; + tag = .string; + }, + '%' => { + state = .result_id; + tag = .result_id; + }, + '|' => { + tag = .pipe; + offset += 1; + break; + }, + '=' => { + tag = .equals; + offset += 1; + break; + }, + else => { + state = .value; + tag = .value; + }, + }, + .value => switch (c) { + '"' => { + try self.addError(offset, "unexpected string literal", .{}); + // The user most likely just forgot a delimiter here - keep + // the tag as value. + break; + }, + ' ', '\t', '\r', '\n', '=', '|' => break, + else => {}, + }, + .result_id => switch (c) { + '_', 'a'...'z', 'A'...'Z', '0'...'9' => {}, + ' ', '\t', '\r', '\n', '=', '|' => break, + else => { + try self.addError(offset, "illegal character in result-id", .{}); + // Again, probably a forgotten delimiter here. + break; + }, + }, + .string => switch (c) { + '\\' => state = .escape, + '"' => state = .string_end, + else => {}, // Note, strings may include newlines + }, + .string_end => switch (c) { + ' ', '\t', '\r', '\n', '=', '|' => break, + else => { + try self.addError(offset, "unexpected character after string literal", .{}); + // The token is still unmistakibly a string. + break; + }, + }, + // Escapes simply skip the next char. + .escape => state = .string, + } + } + + var tok = Token{ + .tag = tag, + .start = token_start, + .end = offset, + }; + + switch (state) { + .string, .escape => { + try self.addError(token_start, "unterminated string", .{}); + }, + .result_id => if (offset - token_start == 1) { + try self.addError(token_start, "result-id must have at least one name character", .{}); + }, + .value => { + const text = self.tokenText(tok); + const prefix = "Op"; + const looks_like_opcode = text.len > prefix.len and + std.mem.startsWith(u8, text, prefix) and + std.ascii.isUpper(text[prefix.len]); + if (looks_like_opcode) + tok.tag = .opcode; + }, + else => {}, + } + + return tok; +} diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 59ed1b9b78..f37b04bff3 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -24,6 +24,37 @@ const Type = @import("type.zig").Type; const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true); +/// This structure represents a function that is in-progress of being emitted. +/// Commonly, the contents of this structure will be merged with the appropriate +/// sections of the module and re-used. Note that the SPIR-V module system makes +/// no attempt of compacting result-id's, so any Fn instance should ultimately +/// be merged into the module it's result-id's are allocated from. +pub const Fn = struct { + /// The prologue of this function; this section contains the function's + /// OpFunction, OpFunctionParameter, OpLabel and OpVariable instructions, and + /// is separated from the actual function contents as OpVariable instructions + /// must appear in the first block of a function definition. + prologue: Section = .{}, + /// The code of the body of this function. + /// This section should also contain the OpFunctionEnd instruction marking + /// the end of this function definition. + body: Section = .{}, + + /// Reset this function without deallocating resources, so that + /// it may be used to emit code for another function. + pub fn reset(self: *Fn) void { + self.prologue.reset(); + self.body.reset(); + } + + /// Free the resources owned by this function. + pub fn deinit(self: *Fn, a: Allocator) void { + self.prologue.deinit(a); + self.body.deinit(a); + self.* = undefined; + } +}; + /// A general-purpose allocator which may be used to allocate resources for this module gpa: Allocator, @@ -40,7 +71,8 @@ sections: struct { // memory model defined by target, not required here. /// OpEntryPoint instructions. entry_points: Section = .{}, - // OpExecutionMode and OpExecutionModeId instructions - skip for now. + /// OpExecutionMode and OpExecutionModeId instructions. + execution_modes: Section = .{}, /// OpString, OpSourcExtension, OpSource, OpSourceContinued. debug_strings: Section = .{}, // OpName, OpMemberName - skip for now. @@ -81,6 +113,7 @@ pub fn deinit(self: *Module) void { self.sections.capabilities.deinit(self.gpa); self.sections.extensions.deinit(self.gpa); self.sections.entry_points.deinit(self.gpa); + self.sections.execution_modes.deinit(self.gpa); self.sections.debug_strings.deinit(self.gpa); self.sections.annotations.deinit(self.gpa); self.sections.types_globals_constants.deinit(self.gpa); @@ -107,7 +140,7 @@ pub fn flush(self: Module, file: std.fs.File) !void { const header = [_]Word{ spec.magic_number, - (spec.version.major << 16) | (spec.version.minor << 8), + (1 << 16) | (5 << 8), 0, // TODO: Register Zig compiler magic number. self.idBound(), 0, // Schema (currently reserved for future use) @@ -119,6 +152,7 @@ pub fn flush(self: Module, file: std.fs.File) !void { self.sections.capabilities.toWords(), self.sections.extensions.toWords(), self.sections.entry_points.toWords(), + self.sections.execution_modes.toWords(), self.sections.debug_strings.toWords(), self.sections.annotations.toWords(), self.sections.types_globals_constants.toWords(), @@ -140,6 +174,12 @@ pub fn flush(self: Module, file: std.fs.File) !void { try file.pwritevAll(&iovc_buffers, 0); } +/// Merge the sections making up a function declaration into this module. +pub fn addFunction(self: *Module, func: Fn) !void { + try self.sections.functions.append(self.gpa, func.prologue); + try self.sections.functions.append(self.gpa, func.body); +} + /// Fetch the result-id of an OpString instruction that encodes the path of the source /// file of the decl. This function may also emit an OpSource with source-level information regarding /// the decl. @@ -175,11 +215,13 @@ pub fn resolveType(self: *Module, ty: Type) !Type.Ref { if (!result.found_existing) { result.value_ptr.* = try self.emitType(ty); } + return result.index; } pub fn resolveTypeId(self: *Module, ty: Type) !IdRef { - return self.typeResultId(try self.resolveType(ty)); + const type_ref = try self.resolveType(ty); + return self.typeResultId(type_ref); } /// Get the result-id of a particular type, by reference. Asserts type_ref is valid. @@ -208,14 +250,18 @@ pub fn emitType(self: *Module, ty: Type) !IdResultType { switch (ty.tag()) { .void => try types.emit(self.gpa, .OpTypeVoid, result_id_operand), .bool => try types.emit(self.gpa, .OpTypeBool, result_id_operand), - .int => try types.emit(self.gpa, .OpTypeInt, .{ - .id_result = result_id, - .width = ty.payload(.int).width, - .signedness = switch (ty.payload(.int).signedness) { - .unsigned => @as(spec.LiteralInteger, 0), + .int => { + const signedness: spec.LiteralInteger = switch (ty.payload(.int).signedness) { + .unsigned => 0, .signed => 1, - }, - }), + }; + + try types.emit(self.gpa, .OpTypeInt, .{ + .id_result = result_id, + .width = ty.payload(.int).width, + .signedness = signedness, + }); + }, .float => try types.emit(self.gpa, .OpTypeFloat, .{ .id_result = result_id, .width = ty.payload(.float).width, diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index d21c2af8f1..6484272943 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -36,14 +36,19 @@ pub fn append(section: *Section, allocator: Allocator, other_section: Section) ! try section.instructions.appendSlice(allocator, other_section.instructions.items); } +/// Ensure capacity of at least `capacity` more words in this section. +pub fn ensureUnusedCapacity(section: *Section, allocator: Allocator, capacity: usize) !void { + try section.instructions.ensureUnusedCapacity(allocator, capacity); +} + /// Write an instruction and size, operands are to be inserted manually. pub fn emitRaw( section: *Section, allocator: Allocator, opcode: Opcode, - operands: usize, // opcode itself not included + operand_words: usize, // opcode itself not included ) !void { - const word_count = 1 + operands; + const word_count = 1 + operand_words; try section.instructions.ensureUnusedCapacity(allocator, word_count); section.writeWord((@intCast(Word, word_count << 16)) | @enumToInt(opcode)); } @@ -96,7 +101,7 @@ pub fn writeWords(section: *Section, words: []const Word) void { section.instructions.appendSliceAssumeCapacity(words); } -fn writeDoubleWord(section: *Section, dword: DoubleWord) void { +pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void { section.writeWords(&.{ @truncate(Word, dword), @truncate(Word, dword >> @bitSizeOf(Word)), diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig index 5fbd371331..3978231829 100644 --- a/src/codegen/spirv/spec.zig +++ b/src/codegen/spirv/spec.zig @@ -39,8 +39,1103 @@ pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: IdRef pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger }; pub const PairIdRefIdRef = [2]IdRef; -pub const version = Version{ .major = 1, .minor = 5, .patch = 4 }; +pub const Quantifier = enum { + required, + optional, + variadic, +}; + +pub const Operand = struct { + kind: OperandKind, + quantifier: Quantifier, +}; + +pub const OperandCategory = enum { + bit_enum, + value_enum, + id, + literal, + composite, +}; + +pub const Enumerant = struct { + name: []const u8, + value: Word, + parameters: []const OperandKind, +}; + +pub const version = Version{ .major = 1, .minor = 6, .patch = 1 }; pub const magic_number: Word = 0x07230203; + +pub const Class = enum { + Miscellaneous, + Debug, + Extension, + ModeSetting, + TypeDeclaration, + ConstantCreation, + Function, + Memory, + Annotation, + Composite, + Image, + Conversion, + Arithmetic, + RelationalAndLogical, + Bit, + Derivative, + Primitive, + Barrier, + Atomic, + ControlFlow, + Group, + Pipe, + DeviceSideEnqueue, + NonUniform, + Reserved, +}; +pub const OperandKind = enum { + ImageOperands, + FPFastMathMode, + SelectionControl, + LoopControl, + FunctionControl, + MemorySemantics, + MemoryAccess, + KernelProfilingInfo, + RayFlags, + FragmentShadingRate, + SourceLanguage, + ExecutionModel, + AddressingModel, + MemoryModel, + ExecutionMode, + StorageClass, + Dim, + SamplerAddressingMode, + SamplerFilterMode, + ImageFormat, + ImageChannelOrder, + ImageChannelDataType, + FPRoundingMode, + FPDenormMode, + QuantizationModes, + FPOperationMode, + OverflowModes, + LinkageType, + AccessQualifier, + FunctionParameterAttribute, + Decoration, + BuiltIn, + Scope, + GroupOperation, + KernelEnqueueFlags, + Capability, + RayQueryIntersection, + RayQueryCommittedIntersectionType, + RayQueryCandidateIntersectionType, + PackedVectorFormat, + IdResultType, + IdResult, + IdMemorySemantics, + IdScope, + IdRef, + LiteralInteger, + LiteralString, + LiteralContextDependentNumber, + LiteralExtInstInteger, + LiteralSpecConstantOpInteger, + PairLiteralIntegerIdRef, + PairIdRefLiteralInteger, + PairIdRefIdRef, + + pub fn category(self: OperandKind) OperandCategory { + return switch (self) { + .ImageOperands => .bit_enum, + .FPFastMathMode => .bit_enum, + .SelectionControl => .bit_enum, + .LoopControl => .bit_enum, + .FunctionControl => .bit_enum, + .MemorySemantics => .bit_enum, + .MemoryAccess => .bit_enum, + .KernelProfilingInfo => .bit_enum, + .RayFlags => .bit_enum, + .FragmentShadingRate => .bit_enum, + .SourceLanguage => .value_enum, + .ExecutionModel => .value_enum, + .AddressingModel => .value_enum, + .MemoryModel => .value_enum, + .ExecutionMode => .value_enum, + .StorageClass => .value_enum, + .Dim => .value_enum, + .SamplerAddressingMode => .value_enum, + .SamplerFilterMode => .value_enum, + .ImageFormat => .value_enum, + .ImageChannelOrder => .value_enum, + .ImageChannelDataType => .value_enum, + .FPRoundingMode => .value_enum, + .FPDenormMode => .value_enum, + .QuantizationModes => .value_enum, + .FPOperationMode => .value_enum, + .OverflowModes => .value_enum, + .LinkageType => .value_enum, + .AccessQualifier => .value_enum, + .FunctionParameterAttribute => .value_enum, + .Decoration => .value_enum, + .BuiltIn => .value_enum, + .Scope => .value_enum, + .GroupOperation => .value_enum, + .KernelEnqueueFlags => .value_enum, + .Capability => .value_enum, + .RayQueryIntersection => .value_enum, + .RayQueryCommittedIntersectionType => .value_enum, + .RayQueryCandidateIntersectionType => .value_enum, + .PackedVectorFormat => .value_enum, + .IdResultType => .id, + .IdResult => .id, + .IdMemorySemantics => .id, + .IdScope => .id, + .IdRef => .id, + .LiteralInteger => .literal, + .LiteralString => .literal, + .LiteralContextDependentNumber => .literal, + .LiteralExtInstInteger => .literal, + .LiteralSpecConstantOpInteger => .literal, + .PairLiteralIntegerIdRef => .composite, + .PairIdRefLiteralInteger => .composite, + .PairIdRefIdRef => .composite, + }; + } + pub fn enumerants(self: OperandKind) []const Enumerant { + return switch (self) { + .ImageOperands => &[_]Enumerant{ + .{ .name = "Bias", .value = 0x0001, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "Lod", .value = 0x0002, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "Grad", .value = 0x0004, .parameters = &[_]OperandKind{ .IdRef, .IdRef } }, + .{ .name = "ConstOffset", .value = 0x0008, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "Offset", .value = 0x0010, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "ConstOffsets", .value = 0x0020, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "Sample", .value = 0x0040, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "MinLod", .value = 0x0080, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "MakeTexelAvailable", .value = 0x0100, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakeTexelAvailableKHR", .value = 0x0100, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakeTexelVisible", .value = 0x0200, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakeTexelVisibleKHR", .value = 0x0200, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "NonPrivateTexel", .value = 0x0400, .parameters = &[_]OperandKind{} }, + .{ .name = "NonPrivateTexelKHR", .value = 0x0400, .parameters = &[_]OperandKind{} }, + .{ .name = "VolatileTexel", .value = 0x0800, .parameters = &[_]OperandKind{} }, + .{ .name = "VolatileTexelKHR", .value = 0x0800, .parameters = &[_]OperandKind{} }, + .{ .name = "SignExtend", .value = 0x1000, .parameters = &[_]OperandKind{} }, + .{ .name = "ZeroExtend", .value = 0x2000, .parameters = &[_]OperandKind{} }, + .{ .name = "Nontemporal", .value = 0x4000, .parameters = &[_]OperandKind{} }, + .{ .name = "Offsets", .value = 0x10000, .parameters = &[_]OperandKind{.IdRef} }, + }, + .FPFastMathMode => &[_]Enumerant{ + .{ .name = "NotNaN", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "NotInf", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "NSZ", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "AllowRecip", .value = 0x0008, .parameters = &[_]OperandKind{} }, + .{ .name = "Fast", .value = 0x0010, .parameters = &[_]OperandKind{} }, + .{ .name = "AllowContractFastINTEL", .value = 0x10000, .parameters = &[_]OperandKind{} }, + .{ .name = "AllowReassocINTEL", .value = 0x20000, .parameters = &[_]OperandKind{} }, + }, + .SelectionControl => &[_]Enumerant{ + .{ .name = "Flatten", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "DontFlatten", .value = 0x0002, .parameters = &[_]OperandKind{} }, + }, + .LoopControl => &[_]Enumerant{ + .{ .name = "Unroll", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "DontUnroll", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "DependencyInfinite", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "DependencyLength", .value = 0x0008, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MinIterations", .value = 0x0010, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxIterations", .value = 0x0020, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "IterationMultiple", .value = 0x0040, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "PeelCount", .value = 0x0080, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "PartialCount", .value = 0x0100, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "InitiationIntervalINTEL", .value = 0x10000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxConcurrencyINTEL", .value = 0x20000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "DependencyArrayINTEL", .value = 0x40000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "PipelineEnableINTEL", .value = 0x80000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "LoopCoalesceINTEL", .value = 0x100000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxInterleavingINTEL", .value = 0x200000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SpeculatedIterationsINTEL", .value = 0x400000, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "NoFusionINTEL", .value = 0x800000, .parameters = &[_]OperandKind{.LiteralInteger} }, + }, + .FunctionControl => &[_]Enumerant{ + .{ .name = "Inline", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "DontInline", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "Pure", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "Const", .value = 0x0008, .parameters = &[_]OperandKind{} }, + .{ .name = "OptNoneINTEL", .value = 0x10000, .parameters = &[_]OperandKind{} }, + }, + .MemorySemantics => &[_]Enumerant{ + .{ .name = "Relaxed", .value = 0x0000, .parameters = &[_]OperandKind{} }, + .{ .name = "Acquire", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "Release", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "AcquireRelease", .value = 0x0008, .parameters = &[_]OperandKind{} }, + .{ .name = "SequentiallyConsistent", .value = 0x0010, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformMemory", .value = 0x0040, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupMemory", .value = 0x0080, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupMemory", .value = 0x0100, .parameters = &[_]OperandKind{} }, + .{ .name = "CrossWorkgroupMemory", .value = 0x0200, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicCounterMemory", .value = 0x0400, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageMemory", .value = 0x0800, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputMemory", .value = 0x1000, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputMemoryKHR", .value = 0x1000, .parameters = &[_]OperandKind{} }, + .{ .name = "MakeAvailable", .value = 0x2000, .parameters = &[_]OperandKind{} }, + .{ .name = "MakeAvailableKHR", .value = 0x2000, .parameters = &[_]OperandKind{} }, + .{ .name = "MakeVisible", .value = 0x4000, .parameters = &[_]OperandKind{} }, + .{ .name = "MakeVisibleKHR", .value = 0x4000, .parameters = &[_]OperandKind{} }, + .{ .name = "Volatile", .value = 0x8000, .parameters = &[_]OperandKind{} }, + }, + .MemoryAccess => &[_]Enumerant{ + .{ .name = "Volatile", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "Aligned", .value = 0x0002, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Nontemporal", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "MakePointerAvailable", .value = 0x0008, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakePointerAvailableKHR", .value = 0x0008, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakePointerVisible", .value = 0x0010, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "MakePointerVisibleKHR", .value = 0x0010, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "NonPrivatePointer", .value = 0x0020, .parameters = &[_]OperandKind{} }, + .{ .name = "NonPrivatePointerKHR", .value = 0x0020, .parameters = &[_]OperandKind{} }, + }, + .KernelProfilingInfo => &[_]Enumerant{ + .{ .name = "CmdExecTime", .value = 0x0001, .parameters = &[_]OperandKind{} }, + }, + .RayFlags => &[_]Enumerant{ + .{ .name = "NoneKHR", .value = 0x0000, .parameters = &[_]OperandKind{} }, + .{ .name = "OpaqueKHR", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "NoOpaqueKHR", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "TerminateOnFirstHitKHR", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "SkipClosestHitShaderKHR", .value = 0x0008, .parameters = &[_]OperandKind{} }, + .{ .name = "CullBackFacingTrianglesKHR", .value = 0x0010, .parameters = &[_]OperandKind{} }, + .{ .name = "CullFrontFacingTrianglesKHR", .value = 0x0020, .parameters = &[_]OperandKind{} }, + .{ .name = "CullOpaqueKHR", .value = 0x0040, .parameters = &[_]OperandKind{} }, + .{ .name = "CullNoOpaqueKHR", .value = 0x0080, .parameters = &[_]OperandKind{} }, + .{ .name = "SkipTrianglesKHR", .value = 0x0100, .parameters = &[_]OperandKind{} }, + .{ .name = "SkipAABBsKHR", .value = 0x0200, .parameters = &[_]OperandKind{} }, + }, + .FragmentShadingRate => &[_]Enumerant{ + .{ .name = "Vertical2Pixels", .value = 0x0001, .parameters = &[_]OperandKind{} }, + .{ .name = "Vertical4Pixels", .value = 0x0002, .parameters = &[_]OperandKind{} }, + .{ .name = "Horizontal2Pixels", .value = 0x0004, .parameters = &[_]OperandKind{} }, + .{ .name = "Horizontal4Pixels", .value = 0x0008, .parameters = &[_]OperandKind{} }, + }, + .SourceLanguage => &[_]Enumerant{ + .{ .name = "Unknown", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "ESSL", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "GLSL", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "OpenCL_C", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "OpenCL_CPP", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "HLSL", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "CPP_for_OpenCL", .value = 6, .parameters = &[_]OperandKind{} }, + }, + .ExecutionModel => &[_]Enumerant{ + .{ .name = "Vertex", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "TessellationControl", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "TessellationEvaluation", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Geometry", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Fragment", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "GLCompute", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "Kernel", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "TaskNV", .value = 5267, .parameters = &[_]OperandKind{} }, + .{ .name = "MeshNV", .value = 5268, .parameters = &[_]OperandKind{} }, + .{ .name = "RayGenerationNV", .value = 5313, .parameters = &[_]OperandKind{} }, + .{ .name = "RayGenerationKHR", .value = 5313, .parameters = &[_]OperandKind{} }, + .{ .name = "IntersectionNV", .value = 5314, .parameters = &[_]OperandKind{} }, + .{ .name = "IntersectionKHR", .value = 5314, .parameters = &[_]OperandKind{} }, + .{ .name = "AnyHitNV", .value = 5315, .parameters = &[_]OperandKind{} }, + .{ .name = "AnyHitKHR", .value = 5315, .parameters = &[_]OperandKind{} }, + .{ .name = "ClosestHitNV", .value = 5316, .parameters = &[_]OperandKind{} }, + .{ .name = "ClosestHitKHR", .value = 5316, .parameters = &[_]OperandKind{} }, + .{ .name = "MissNV", .value = 5317, .parameters = &[_]OperandKind{} }, + .{ .name = "MissKHR", .value = 5317, .parameters = &[_]OperandKind{} }, + .{ .name = "CallableNV", .value = 5318, .parameters = &[_]OperandKind{} }, + .{ .name = "CallableKHR", .value = 5318, .parameters = &[_]OperandKind{} }, + }, + .AddressingModel => &[_]Enumerant{ + .{ .name = "Logical", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Physical32", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Physical64", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBuffer64", .value = 5348, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBuffer64EXT", .value = 5348, .parameters = &[_]OperandKind{} }, + }, + .MemoryModel => &[_]Enumerant{ + .{ .name = "Simple", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "GLSL450", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "OpenCL", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Vulkan", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "VulkanKHR", .value = 3, .parameters = &[_]OperandKind{} }, + }, + .ExecutionMode => &[_]Enumerant{ + .{ .name = "Invocations", .value = 0, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SpacingEqual", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "SpacingFractionalEven", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "SpacingFractionalOdd", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "VertexOrderCw", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "VertexOrderCcw", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "PixelCenterInteger", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "OriginUpperLeft", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "OriginLowerLeft", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "EarlyFragmentTests", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "PointMode", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "Xfb", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "DepthReplacing", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "DepthGreater", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "DepthLess", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "DepthUnchanged", .value = 16, .parameters = &[_]OperandKind{} }, + .{ .name = "LocalSize", .value = 17, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, + .{ .name = "LocalSizeHint", .value = 18, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, + .{ .name = "InputPoints", .value = 19, .parameters = &[_]OperandKind{} }, + .{ .name = "InputLines", .value = 20, .parameters = &[_]OperandKind{} }, + .{ .name = "InputLinesAdjacency", .value = 21, .parameters = &[_]OperandKind{} }, + .{ .name = "Triangles", .value = 22, .parameters = &[_]OperandKind{} }, + .{ .name = "InputTrianglesAdjacency", .value = 23, .parameters = &[_]OperandKind{} }, + .{ .name = "Quads", .value = 24, .parameters = &[_]OperandKind{} }, + .{ .name = "Isolines", .value = 25, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputVertices", .value = 26, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "OutputPoints", .value = 27, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputLineStrip", .value = 28, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputTriangleStrip", .value = 29, .parameters = &[_]OperandKind{} }, + .{ .name = "VecTypeHint", .value = 30, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "ContractionOff", .value = 31, .parameters = &[_]OperandKind{} }, + .{ .name = "Initializer", .value = 33, .parameters = &[_]OperandKind{} }, + .{ .name = "Finalizer", .value = 34, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupSize", .value = 35, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SubgroupsPerWorkgroup", .value = 36, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SubgroupsPerWorkgroupId", .value = 37, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "LocalSizeId", .value = 38, .parameters = &[_]OperandKind{ .IdRef, .IdRef, .IdRef } }, + .{ .name = "LocalSizeHintId", .value = 39, .parameters = &[_]OperandKind{ .IdRef, .IdRef, .IdRef } }, + .{ .name = "SubgroupUniformControlFlowKHR", .value = 4421, .parameters = &[_]OperandKind{} }, + .{ .name = "PostDepthCoverage", .value = 4446, .parameters = &[_]OperandKind{} }, + .{ .name = "DenormPreserve", .value = 4459, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "DenormFlushToZero", .value = 4460, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SignedZeroInfNanPreserve", .value = 4461, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "RoundingModeRTE", .value = 4462, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "RoundingModeRTZ", .value = 4463, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "StencilRefReplacingEXT", .value = 5027, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputLinesNV", .value = 5269, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputPrimitivesNV", .value = 5270, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "DerivativeGroupQuadsNV", .value = 5289, .parameters = &[_]OperandKind{} }, + .{ .name = "DerivativeGroupLinearNV", .value = 5290, .parameters = &[_]OperandKind{} }, + .{ .name = "OutputTrianglesNV", .value = 5298, .parameters = &[_]OperandKind{} }, + .{ .name = "PixelInterlockOrderedEXT", .value = 5366, .parameters = &[_]OperandKind{} }, + .{ .name = "PixelInterlockUnorderedEXT", .value = 5367, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleInterlockOrderedEXT", .value = 5368, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleInterlockUnorderedEXT", .value = 5369, .parameters = &[_]OperandKind{} }, + .{ .name = "ShadingRateInterlockOrderedEXT", .value = 5370, .parameters = &[_]OperandKind{} }, + .{ .name = "ShadingRateInterlockUnorderedEXT", .value = 5371, .parameters = &[_]OperandKind{} }, + .{ .name = "SharedLocalMemorySizeINTEL", .value = 5618, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "RoundingModeRTPINTEL", .value = 5620, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "RoundingModeRTNINTEL", .value = 5621, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "FloatingPointModeALTINTEL", .value = 5622, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "FloatingPointModeIEEEINTEL", .value = 5623, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxWorkgroupSizeINTEL", .value = 5893, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, + .{ .name = "MaxWorkDimINTEL", .value = 5894, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "NoGlobalOffsetINTEL", .value = 5895, .parameters = &[_]OperandKind{} }, + .{ .name = "NumSIMDWorkitemsINTEL", .value = 5896, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SchedulerTargetFmaxMhzINTEL", .value = 5903, .parameters = &[_]OperandKind{.LiteralInteger} }, + }, + .StorageClass => &[_]Enumerant{ + .{ .name = "UniformConstant", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Input", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Uniform", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Output", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Workgroup", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "CrossWorkgroup", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "Private", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "Function", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "Generic", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "PushConstant", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicCounter", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "Image", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBuffer", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "CallableDataNV", .value = 5328, .parameters = &[_]OperandKind{} }, + .{ .name = "CallableDataKHR", .value = 5328, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingCallableDataNV", .value = 5329, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingCallableDataKHR", .value = 5329, .parameters = &[_]OperandKind{} }, + .{ .name = "RayPayloadNV", .value = 5338, .parameters = &[_]OperandKind{} }, + .{ .name = "RayPayloadKHR", .value = 5338, .parameters = &[_]OperandKind{} }, + .{ .name = "HitAttributeNV", .value = 5339, .parameters = &[_]OperandKind{} }, + .{ .name = "HitAttributeKHR", .value = 5339, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingRayPayloadNV", .value = 5342, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingRayPayloadKHR", .value = 5342, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderRecordBufferNV", .value = 5343, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderRecordBufferKHR", .value = 5343, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBuffer", .value = 5349, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBufferEXT", .value = 5349, .parameters = &[_]OperandKind{} }, + .{ .name = "CodeSectionINTEL", .value = 5605, .parameters = &[_]OperandKind{} }, + .{ .name = "DeviceOnlyINTEL", .value = 5936, .parameters = &[_]OperandKind{} }, + .{ .name = "HostOnlyINTEL", .value = 5937, .parameters = &[_]OperandKind{} }, + }, + .Dim => &[_]Enumerant{ + .{ .name = "1D", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "2D", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "3D", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Cube", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Rect", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "Buffer", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "SubpassData", .value = 6, .parameters = &[_]OperandKind{} }, + }, + .SamplerAddressingMode => &[_]Enumerant{ + .{ .name = "None", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "ClampToEdge", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Clamp", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Repeat", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "RepeatMirrored", .value = 4, .parameters = &[_]OperandKind{} }, + }, + .SamplerFilterMode => &[_]Enumerant{ + .{ .name = "Nearest", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Linear", .value = 1, .parameters = &[_]OperandKind{} }, + }, + .ImageFormat => &[_]Enumerant{ + .{ .name = "Unknown", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba32f", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba16f", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "R32f", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba8", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba8Snorm", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg32f", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg16f", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "R11fG11fB10f", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "R16f", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba16", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgb10A2", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg16", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg8", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "R16", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "R8", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba16Snorm", .value = 16, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg16Snorm", .value = 17, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg8Snorm", .value = 18, .parameters = &[_]OperandKind{} }, + .{ .name = "R16Snorm", .value = 19, .parameters = &[_]OperandKind{} }, + .{ .name = "R8Snorm", .value = 20, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba32i", .value = 21, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba16i", .value = 22, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba8i", .value = 23, .parameters = &[_]OperandKind{} }, + .{ .name = "R32i", .value = 24, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg32i", .value = 25, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg16i", .value = 26, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg8i", .value = 27, .parameters = &[_]OperandKind{} }, + .{ .name = "R16i", .value = 28, .parameters = &[_]OperandKind{} }, + .{ .name = "R8i", .value = 29, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba32ui", .value = 30, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba16ui", .value = 31, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgba8ui", .value = 32, .parameters = &[_]OperandKind{} }, + .{ .name = "R32ui", .value = 33, .parameters = &[_]OperandKind{} }, + .{ .name = "Rgb10a2ui", .value = 34, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg32ui", .value = 35, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg16ui", .value = 36, .parameters = &[_]OperandKind{} }, + .{ .name = "Rg8ui", .value = 37, .parameters = &[_]OperandKind{} }, + .{ .name = "R16ui", .value = 38, .parameters = &[_]OperandKind{} }, + .{ .name = "R8ui", .value = 39, .parameters = &[_]OperandKind{} }, + .{ .name = "R64ui", .value = 40, .parameters = &[_]OperandKind{} }, + .{ .name = "R64i", .value = 41, .parameters = &[_]OperandKind{} }, + }, + .ImageChannelOrder => &[_]Enumerant{ + .{ .name = "R", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "A", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "RG", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "RA", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "RGB", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "RGBA", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "BGRA", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "ARGB", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "Intensity", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "Luminance", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "Rx", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "RGx", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "RGBx", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "Depth", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "DepthStencil", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "sRGB", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "sRGBx", .value = 16, .parameters = &[_]OperandKind{} }, + .{ .name = "sRGBA", .value = 17, .parameters = &[_]OperandKind{} }, + .{ .name = "sBGRA", .value = 18, .parameters = &[_]OperandKind{} }, + .{ .name = "ABGR", .value = 19, .parameters = &[_]OperandKind{} }, + }, + .ImageChannelDataType => &[_]Enumerant{ + .{ .name = "SnormInt8", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "SnormInt16", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormInt8", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormInt16", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormShort565", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormShort555", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormInt101010", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "SignedInt8", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "SignedInt16", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "SignedInt32", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "UnsignedInt8", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "UnsignedInt16", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "UnsignedInt32", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "HalfFloat", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "Float", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormInt24", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "UnormInt101010_2", .value = 16, .parameters = &[_]OperandKind{} }, + }, + .FPRoundingMode => &[_]Enumerant{ + .{ .name = "RTE", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "RTZ", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "RTP", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "RTN", .value = 3, .parameters = &[_]OperandKind{} }, + }, + .FPDenormMode => &[_]Enumerant{ + .{ .name = "Preserve", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "FlushToZero", .value = 1, .parameters = &[_]OperandKind{} }, + }, + .QuantizationModes => &[_]Enumerant{ + .{ .name = "TRN", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "TRN_ZERO", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "RND", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "RND_ZERO", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "RND_INF", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "RND_MIN_INF", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "RND_CONV", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "RND_CONV_ODD", .value = 7, .parameters = &[_]OperandKind{} }, + }, + .FPOperationMode => &[_]Enumerant{ + .{ .name = "IEEE", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "ALT", .value = 1, .parameters = &[_]OperandKind{} }, + }, + .OverflowModes => &[_]Enumerant{ + .{ .name = "WRAP", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "SAT", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "SAT_ZERO", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "SAT_SYM", .value = 3, .parameters = &[_]OperandKind{} }, + }, + .LinkageType => &[_]Enumerant{ + .{ .name = "Export", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Import", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "LinkOnceODR", .value = 2, .parameters = &[_]OperandKind{} }, + }, + .AccessQualifier => &[_]Enumerant{ + .{ .name = "ReadOnly", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "WriteOnly", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "ReadWrite", .value = 2, .parameters = &[_]OperandKind{} }, + }, + .FunctionParameterAttribute => &[_]Enumerant{ + .{ .name = "Zext", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Sext", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "ByVal", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Sret", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "NoAlias", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "NoCapture", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "NoWrite", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "NoReadWrite", .value = 7, .parameters = &[_]OperandKind{} }, + }, + .Decoration => &[_]Enumerant{ + .{ .name = "RelaxedPrecision", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "SpecId", .value = 1, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Block", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "BufferBlock", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "RowMajor", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "ColMajor", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "ArrayStride", .value = 6, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MatrixStride", .value = 7, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "GLSLShared", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "GLSLPacked", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "CPacked", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "BuiltIn", .value = 11, .parameters = &[_]OperandKind{.BuiltIn} }, + .{ .name = "NoPerspective", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "Flat", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "Patch", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "Centroid", .value = 16, .parameters = &[_]OperandKind{} }, + .{ .name = "Sample", .value = 17, .parameters = &[_]OperandKind{} }, + .{ .name = "Invariant", .value = 18, .parameters = &[_]OperandKind{} }, + .{ .name = "Restrict", .value = 19, .parameters = &[_]OperandKind{} }, + .{ .name = "Aliased", .value = 20, .parameters = &[_]OperandKind{} }, + .{ .name = "Volatile", .value = 21, .parameters = &[_]OperandKind{} }, + .{ .name = "Constant", .value = 22, .parameters = &[_]OperandKind{} }, + .{ .name = "Coherent", .value = 23, .parameters = &[_]OperandKind{} }, + .{ .name = "NonWritable", .value = 24, .parameters = &[_]OperandKind{} }, + .{ .name = "NonReadable", .value = 25, .parameters = &[_]OperandKind{} }, + .{ .name = "Uniform", .value = 26, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformId", .value = 27, .parameters = &[_]OperandKind{.IdScope} }, + .{ .name = "SaturatedConversion", .value = 28, .parameters = &[_]OperandKind{} }, + .{ .name = "Stream", .value = 29, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Location", .value = 30, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Component", .value = 31, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Index", .value = 32, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Binding", .value = 33, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "DescriptorSet", .value = 34, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Offset", .value = 35, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "XfbBuffer", .value = 36, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "XfbStride", .value = 37, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "FuncParamAttr", .value = 38, .parameters = &[_]OperandKind{.FunctionParameterAttribute} }, + .{ .name = "FPRoundingMode", .value = 39, .parameters = &[_]OperandKind{.FPRoundingMode} }, + .{ .name = "FPFastMathMode", .value = 40, .parameters = &[_]OperandKind{.FPFastMathMode} }, + .{ .name = "LinkageAttributes", .value = 41, .parameters = &[_]OperandKind{ .LiteralString, .LinkageType } }, + .{ .name = "NoContraction", .value = 42, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachmentIndex", .value = 43, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "Alignment", .value = 44, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxByteOffset", .value = 45, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "AlignmentId", .value = 46, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "MaxByteOffsetId", .value = 47, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "NoSignedWrap", .value = 4469, .parameters = &[_]OperandKind{} }, + .{ .name = "NoUnsignedWrap", .value = 4470, .parameters = &[_]OperandKind{} }, + .{ .name = "ExplicitInterpAMD", .value = 4999, .parameters = &[_]OperandKind{} }, + .{ .name = "OverrideCoverageNV", .value = 5248, .parameters = &[_]OperandKind{} }, + .{ .name = "PassthroughNV", .value = 5250, .parameters = &[_]OperandKind{} }, + .{ .name = "ViewportRelativeNV", .value = 5252, .parameters = &[_]OperandKind{} }, + .{ .name = "SecondaryViewportRelativeNV", .value = 5256, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "PerPrimitiveNV", .value = 5271, .parameters = &[_]OperandKind{} }, + .{ .name = "PerViewNV", .value = 5272, .parameters = &[_]OperandKind{} }, + .{ .name = "PerTaskNV", .value = 5273, .parameters = &[_]OperandKind{} }, + .{ .name = "PerVertexKHR", .value = 5285, .parameters = &[_]OperandKind{} }, + .{ .name = "PerVertexNV", .value = 5285, .parameters = &[_]OperandKind{} }, + .{ .name = "NonUniform", .value = 5300, .parameters = &[_]OperandKind{} }, + .{ .name = "NonUniformEXT", .value = 5300, .parameters = &[_]OperandKind{} }, + .{ .name = "RestrictPointer", .value = 5355, .parameters = &[_]OperandKind{} }, + .{ .name = "RestrictPointerEXT", .value = 5355, .parameters = &[_]OperandKind{} }, + .{ .name = "AliasedPointer", .value = 5356, .parameters = &[_]OperandKind{} }, + .{ .name = "AliasedPointerEXT", .value = 5356, .parameters = &[_]OperandKind{} }, + .{ .name = "BindlessSamplerNV", .value = 5398, .parameters = &[_]OperandKind{} }, + .{ .name = "BindlessImageNV", .value = 5399, .parameters = &[_]OperandKind{} }, + .{ .name = "BoundSamplerNV", .value = 5400, .parameters = &[_]OperandKind{} }, + .{ .name = "BoundImageNV", .value = 5401, .parameters = &[_]OperandKind{} }, + .{ .name = "SIMTCallINTEL", .value = 5599, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "ReferencedIndirectlyINTEL", .value = 5602, .parameters = &[_]OperandKind{} }, + .{ .name = "ClobberINTEL", .value = 5607, .parameters = &[_]OperandKind{.LiteralString} }, + .{ .name = "SideEffectsINTEL", .value = 5608, .parameters = &[_]OperandKind{} }, + .{ .name = "VectorComputeVariableINTEL", .value = 5624, .parameters = &[_]OperandKind{} }, + .{ .name = "FuncParamIOKindINTEL", .value = 5625, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "VectorComputeFunctionINTEL", .value = 5626, .parameters = &[_]OperandKind{} }, + .{ .name = "StackCallINTEL", .value = 5627, .parameters = &[_]OperandKind{} }, + .{ .name = "GlobalVariableOffsetINTEL", .value = 5628, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "CounterBuffer", .value = 5634, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "HlslCounterBufferGOOGLE", .value = 5634, .parameters = &[_]OperandKind{.IdRef} }, + .{ .name = "UserSemantic", .value = 5635, .parameters = &[_]OperandKind{.LiteralString} }, + .{ .name = "HlslSemanticGOOGLE", .value = 5635, .parameters = &[_]OperandKind{.LiteralString} }, + .{ .name = "UserTypeGOOGLE", .value = 5636, .parameters = &[_]OperandKind{.LiteralString} }, + .{ .name = "FunctionRoundingModeINTEL", .value = 5822, .parameters = &[_]OperandKind{ .LiteralInteger, .FPRoundingMode } }, + .{ .name = "FunctionDenormModeINTEL", .value = 5823, .parameters = &[_]OperandKind{ .LiteralInteger, .FPDenormMode } }, + .{ .name = "RegisterINTEL", .value = 5825, .parameters = &[_]OperandKind{} }, + .{ .name = "MemoryINTEL", .value = 5826, .parameters = &[_]OperandKind{.LiteralString} }, + .{ .name = "NumbanksINTEL", .value = 5827, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "BankwidthINTEL", .value = 5828, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "MaxPrivateCopiesINTEL", .value = 5829, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SinglepumpINTEL", .value = 5830, .parameters = &[_]OperandKind{} }, + .{ .name = "DoublepumpINTEL", .value = 5831, .parameters = &[_]OperandKind{} }, + .{ .name = "MaxReplicatesINTEL", .value = 5832, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "SimpleDualPortINTEL", .value = 5833, .parameters = &[_]OperandKind{} }, + .{ .name = "MergeINTEL", .value = 5834, .parameters = &[_]OperandKind{ .LiteralString, .LiteralString } }, + .{ .name = "BankBitsINTEL", .value = 5835, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "ForcePow2DepthINTEL", .value = 5836, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "BurstCoalesceINTEL", .value = 5899, .parameters = &[_]OperandKind{} }, + .{ .name = "CacheSizeINTEL", .value = 5900, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "DontStaticallyCoalesceINTEL", .value = 5901, .parameters = &[_]OperandKind{} }, + .{ .name = "PrefetchINTEL", .value = 5902, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "StallEnableINTEL", .value = 5905, .parameters = &[_]OperandKind{} }, + .{ .name = "FuseLoopsInFunctionINTEL", .value = 5907, .parameters = &[_]OperandKind{} }, + .{ .name = "BufferLocationINTEL", .value = 5921, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "IOPipeStorageINTEL", .value = 5944, .parameters = &[_]OperandKind{.LiteralInteger} }, + .{ .name = "FunctionFloatingPointModeINTEL", .value = 6080, .parameters = &[_]OperandKind{ .LiteralInteger, .FPOperationMode } }, + .{ .name = "SingleElementVectorINTEL", .value = 6085, .parameters = &[_]OperandKind{} }, + .{ .name = "VectorComputeCallableFunctionINTEL", .value = 6087, .parameters = &[_]OperandKind{} }, + .{ .name = "MediaBlockIOINTEL", .value = 6140, .parameters = &[_]OperandKind{} }, + }, + .BuiltIn => &[_]Enumerant{ + .{ .name = "Position", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "PointSize", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "ClipDistance", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "CullDistance", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "VertexId", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "InstanceId", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "PrimitiveId", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "InvocationId", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "Layer", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "ViewportIndex", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "TessLevelOuter", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "TessLevelInner", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "TessCoord", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "PatchVertices", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "FragCoord", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "PointCoord", .value = 16, .parameters = &[_]OperandKind{} }, + .{ .name = "FrontFacing", .value = 17, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleId", .value = 18, .parameters = &[_]OperandKind{} }, + .{ .name = "SamplePosition", .value = 19, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleMask", .value = 20, .parameters = &[_]OperandKind{} }, + .{ .name = "FragDepth", .value = 22, .parameters = &[_]OperandKind{} }, + .{ .name = "HelperInvocation", .value = 23, .parameters = &[_]OperandKind{} }, + .{ .name = "NumWorkgroups", .value = 24, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupSize", .value = 25, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupId", .value = 26, .parameters = &[_]OperandKind{} }, + .{ .name = "LocalInvocationId", .value = 27, .parameters = &[_]OperandKind{} }, + .{ .name = "GlobalInvocationId", .value = 28, .parameters = &[_]OperandKind{} }, + .{ .name = "LocalInvocationIndex", .value = 29, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkDim", .value = 30, .parameters = &[_]OperandKind{} }, + .{ .name = "GlobalSize", .value = 31, .parameters = &[_]OperandKind{} }, + .{ .name = "EnqueuedWorkgroupSize", .value = 32, .parameters = &[_]OperandKind{} }, + .{ .name = "GlobalOffset", .value = 33, .parameters = &[_]OperandKind{} }, + .{ .name = "GlobalLinearId", .value = 34, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupSize", .value = 36, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupMaxSize", .value = 37, .parameters = &[_]OperandKind{} }, + .{ .name = "NumSubgroups", .value = 38, .parameters = &[_]OperandKind{} }, + .{ .name = "NumEnqueuedSubgroups", .value = 39, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupId", .value = 40, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupLocalInvocationId", .value = 41, .parameters = &[_]OperandKind{} }, + .{ .name = "VertexIndex", .value = 42, .parameters = &[_]OperandKind{} }, + .{ .name = "InstanceIndex", .value = 43, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupEqMask", .value = 4416, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupEqMaskKHR", .value = 4416, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupGeMask", .value = 4417, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupGeMaskKHR", .value = 4417, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupGtMask", .value = 4418, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupGtMaskKHR", .value = 4418, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupLeMask", .value = 4419, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupLeMaskKHR", .value = 4419, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupLtMask", .value = 4420, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupLtMaskKHR", .value = 4420, .parameters = &[_]OperandKind{} }, + .{ .name = "BaseVertex", .value = 4424, .parameters = &[_]OperandKind{} }, + .{ .name = "BaseInstance", .value = 4425, .parameters = &[_]OperandKind{} }, + .{ .name = "DrawIndex", .value = 4426, .parameters = &[_]OperandKind{} }, + .{ .name = "PrimitiveShadingRateKHR", .value = 4432, .parameters = &[_]OperandKind{} }, + .{ .name = "DeviceIndex", .value = 4438, .parameters = &[_]OperandKind{} }, + .{ .name = "ViewIndex", .value = 4440, .parameters = &[_]OperandKind{} }, + .{ .name = "ShadingRateKHR", .value = 4444, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNoPerspAMD", .value = 4992, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNoPerspCentroidAMD", .value = 4993, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNoPerspSampleAMD", .value = 4994, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordSmoothAMD", .value = 4995, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordSmoothCentroidAMD", .value = 4996, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordSmoothSampleAMD", .value = 4997, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordPullModelAMD", .value = 4998, .parameters = &[_]OperandKind{} }, + .{ .name = "FragStencilRefEXT", .value = 5014, .parameters = &[_]OperandKind{} }, + .{ .name = "ViewportMaskNV", .value = 5253, .parameters = &[_]OperandKind{} }, + .{ .name = "SecondaryPositionNV", .value = 5257, .parameters = &[_]OperandKind{} }, + .{ .name = "SecondaryViewportMaskNV", .value = 5258, .parameters = &[_]OperandKind{} }, + .{ .name = "PositionPerViewNV", .value = 5261, .parameters = &[_]OperandKind{} }, + .{ .name = "ViewportMaskPerViewNV", .value = 5262, .parameters = &[_]OperandKind{} }, + .{ .name = "FullyCoveredEXT", .value = 5264, .parameters = &[_]OperandKind{} }, + .{ .name = "TaskCountNV", .value = 5274, .parameters = &[_]OperandKind{} }, + .{ .name = "PrimitiveCountNV", .value = 5275, .parameters = &[_]OperandKind{} }, + .{ .name = "PrimitiveIndicesNV", .value = 5276, .parameters = &[_]OperandKind{} }, + .{ .name = "ClipDistancePerViewNV", .value = 5277, .parameters = &[_]OperandKind{} }, + .{ .name = "CullDistancePerViewNV", .value = 5278, .parameters = &[_]OperandKind{} }, + .{ .name = "LayerPerViewNV", .value = 5279, .parameters = &[_]OperandKind{} }, + .{ .name = "MeshViewCountNV", .value = 5280, .parameters = &[_]OperandKind{} }, + .{ .name = "MeshViewIndicesNV", .value = 5281, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordKHR", .value = 5286, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNV", .value = 5286, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNoPerspKHR", .value = 5287, .parameters = &[_]OperandKind{} }, + .{ .name = "BaryCoordNoPerspNV", .value = 5287, .parameters = &[_]OperandKind{} }, + .{ .name = "FragSizeEXT", .value = 5292, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentSizeNV", .value = 5292, .parameters = &[_]OperandKind{} }, + .{ .name = "FragInvocationCountEXT", .value = 5293, .parameters = &[_]OperandKind{} }, + .{ .name = "InvocationsPerPixelNV", .value = 5293, .parameters = &[_]OperandKind{} }, + .{ .name = "LaunchIdNV", .value = 5319, .parameters = &[_]OperandKind{} }, + .{ .name = "LaunchIdKHR", .value = 5319, .parameters = &[_]OperandKind{} }, + .{ .name = "LaunchSizeNV", .value = 5320, .parameters = &[_]OperandKind{} }, + .{ .name = "LaunchSizeKHR", .value = 5320, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldRayOriginNV", .value = 5321, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldRayOriginKHR", .value = 5321, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldRayDirectionNV", .value = 5322, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldRayDirectionKHR", .value = 5322, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectRayOriginNV", .value = 5323, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectRayOriginKHR", .value = 5323, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectRayDirectionNV", .value = 5324, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectRayDirectionKHR", .value = 5324, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTminNV", .value = 5325, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTminKHR", .value = 5325, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTmaxNV", .value = 5326, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTmaxKHR", .value = 5326, .parameters = &[_]OperandKind{} }, + .{ .name = "InstanceCustomIndexNV", .value = 5327, .parameters = &[_]OperandKind{} }, + .{ .name = "InstanceCustomIndexKHR", .value = 5327, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectToWorldNV", .value = 5330, .parameters = &[_]OperandKind{} }, + .{ .name = "ObjectToWorldKHR", .value = 5330, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldToObjectNV", .value = 5331, .parameters = &[_]OperandKind{} }, + .{ .name = "WorldToObjectKHR", .value = 5331, .parameters = &[_]OperandKind{} }, + .{ .name = "HitTNV", .value = 5332, .parameters = &[_]OperandKind{} }, + .{ .name = "HitKindNV", .value = 5333, .parameters = &[_]OperandKind{} }, + .{ .name = "HitKindKHR", .value = 5333, .parameters = &[_]OperandKind{} }, + .{ .name = "CurrentRayTimeNV", .value = 5334, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingRayFlagsNV", .value = 5351, .parameters = &[_]OperandKind{} }, + .{ .name = "IncomingRayFlagsKHR", .value = 5351, .parameters = &[_]OperandKind{} }, + .{ .name = "RayGeometryIndexKHR", .value = 5352, .parameters = &[_]OperandKind{} }, + .{ .name = "WarpsPerSMNV", .value = 5374, .parameters = &[_]OperandKind{} }, + .{ .name = "SMCountNV", .value = 5375, .parameters = &[_]OperandKind{} }, + .{ .name = "WarpIDNV", .value = 5376, .parameters = &[_]OperandKind{} }, + .{ .name = "SMIDNV", .value = 5377, .parameters = &[_]OperandKind{} }, + }, + .Scope => &[_]Enumerant{ + .{ .name = "CrossDevice", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Device", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Workgroup", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Subgroup", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Invocation", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "QueueFamily", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "QueueFamilyKHR", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderCallKHR", .value = 6, .parameters = &[_]OperandKind{} }, + }, + .GroupOperation => &[_]Enumerant{ + .{ .name = "Reduce", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "InclusiveScan", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "ExclusiveScan", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "ClusteredReduce", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "PartitionedReduceNV", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "PartitionedInclusiveScanNV", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "PartitionedExclusiveScanNV", .value = 8, .parameters = &[_]OperandKind{} }, + }, + .KernelEnqueueFlags => &[_]Enumerant{ + .{ .name = "NoWait", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "WaitKernel", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "WaitWorkGroup", .value = 2, .parameters = &[_]OperandKind{} }, + }, + .Capability => &[_]Enumerant{ + .{ .name = "Matrix", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "Shader", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "Geometry", .value = 2, .parameters = &[_]OperandKind{} }, + .{ .name = "Tessellation", .value = 3, .parameters = &[_]OperandKind{} }, + .{ .name = "Addresses", .value = 4, .parameters = &[_]OperandKind{} }, + .{ .name = "Linkage", .value = 5, .parameters = &[_]OperandKind{} }, + .{ .name = "Kernel", .value = 6, .parameters = &[_]OperandKind{} }, + .{ .name = "Vector16", .value = 7, .parameters = &[_]OperandKind{} }, + .{ .name = "Float16Buffer", .value = 8, .parameters = &[_]OperandKind{} }, + .{ .name = "Float16", .value = 9, .parameters = &[_]OperandKind{} }, + .{ .name = "Float64", .value = 10, .parameters = &[_]OperandKind{} }, + .{ .name = "Int64", .value = 11, .parameters = &[_]OperandKind{} }, + .{ .name = "Int64Atomics", .value = 12, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageBasic", .value = 13, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageReadWrite", .value = 14, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageMipmap", .value = 15, .parameters = &[_]OperandKind{} }, + .{ .name = "Pipes", .value = 17, .parameters = &[_]OperandKind{} }, + .{ .name = "Groups", .value = 18, .parameters = &[_]OperandKind{} }, + .{ .name = "DeviceEnqueue", .value = 19, .parameters = &[_]OperandKind{} }, + .{ .name = "LiteralSampler", .value = 20, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicStorage", .value = 21, .parameters = &[_]OperandKind{} }, + .{ .name = "Int16", .value = 22, .parameters = &[_]OperandKind{} }, + .{ .name = "TessellationPointSize", .value = 23, .parameters = &[_]OperandKind{} }, + .{ .name = "GeometryPointSize", .value = 24, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageGatherExtended", .value = 25, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageMultisample", .value = 27, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformBufferArrayDynamicIndexing", .value = 28, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledImageArrayDynamicIndexing", .value = 29, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBufferArrayDynamicIndexing", .value = 30, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageArrayDynamicIndexing", .value = 31, .parameters = &[_]OperandKind{} }, + .{ .name = "ClipDistance", .value = 32, .parameters = &[_]OperandKind{} }, + .{ .name = "CullDistance", .value = 33, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageCubeArray", .value = 34, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleRateShading", .value = 35, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageRect", .value = 36, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledRect", .value = 37, .parameters = &[_]OperandKind{} }, + .{ .name = "GenericPointer", .value = 38, .parameters = &[_]OperandKind{} }, + .{ .name = "Int8", .value = 39, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachment", .value = 40, .parameters = &[_]OperandKind{} }, + .{ .name = "SparseResidency", .value = 41, .parameters = &[_]OperandKind{} }, + .{ .name = "MinLod", .value = 42, .parameters = &[_]OperandKind{} }, + .{ .name = "Sampled1D", .value = 43, .parameters = &[_]OperandKind{} }, + .{ .name = "Image1D", .value = 44, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledCubeArray", .value = 45, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledBuffer", .value = 46, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageBuffer", .value = 47, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageMSArray", .value = 48, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageExtendedFormats", .value = 49, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageQuery", .value = 50, .parameters = &[_]OperandKind{} }, + .{ .name = "DerivativeControl", .value = 51, .parameters = &[_]OperandKind{} }, + .{ .name = "InterpolationFunction", .value = 52, .parameters = &[_]OperandKind{} }, + .{ .name = "TransformFeedback", .value = 53, .parameters = &[_]OperandKind{} }, + .{ .name = "GeometryStreams", .value = 54, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageReadWithoutFormat", .value = 55, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageWriteWithoutFormat", .value = 56, .parameters = &[_]OperandKind{} }, + .{ .name = "MultiViewport", .value = 57, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupDispatch", .value = 58, .parameters = &[_]OperandKind{} }, + .{ .name = "NamedBarrier", .value = 59, .parameters = &[_]OperandKind{} }, + .{ .name = "PipeStorage", .value = 60, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniform", .value = 61, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformVote", .value = 62, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformArithmetic", .value = 63, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformBallot", .value = 64, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformShuffle", .value = 65, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformShuffleRelative", .value = 66, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformClustered", .value = 67, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformQuad", .value = 68, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderLayer", .value = 69, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderViewportIndex", .value = 70, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformDecoration", .value = 71, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentShadingRateKHR", .value = 4422, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupBallotKHR", .value = 4423, .parameters = &[_]OperandKind{} }, + .{ .name = "DrawParameters", .value = 4427, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupMemoryExplicitLayoutKHR", .value = 4428, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupMemoryExplicitLayout8BitAccessKHR", .value = 4429, .parameters = &[_]OperandKind{} }, + .{ .name = "WorkgroupMemoryExplicitLayout16BitAccessKHR", .value = 4430, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupVoteKHR", .value = 4431, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBuffer16BitAccess", .value = 4433, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageUniformBufferBlock16", .value = 4433, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformAndStorageBuffer16BitAccess", .value = 4434, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageUniform16", .value = 4434, .parameters = &[_]OperandKind{} }, + .{ .name = "StoragePushConstant16", .value = 4435, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageInputOutput16", .value = 4436, .parameters = &[_]OperandKind{} }, + .{ .name = "DeviceGroup", .value = 4437, .parameters = &[_]OperandKind{} }, + .{ .name = "MultiView", .value = 4439, .parameters = &[_]OperandKind{} }, + .{ .name = "VariablePointersStorageBuffer", .value = 4441, .parameters = &[_]OperandKind{} }, + .{ .name = "VariablePointers", .value = 4442, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicStorageOps", .value = 4445, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleMaskPostDepthCoverage", .value = 4447, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBuffer8BitAccess", .value = 4448, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformAndStorageBuffer8BitAccess", .value = 4449, .parameters = &[_]OperandKind{} }, + .{ .name = "StoragePushConstant8", .value = 4450, .parameters = &[_]OperandKind{} }, + .{ .name = "DenormPreserve", .value = 4464, .parameters = &[_]OperandKind{} }, + .{ .name = "DenormFlushToZero", .value = 4465, .parameters = &[_]OperandKind{} }, + .{ .name = "SignedZeroInfNanPreserve", .value = 4466, .parameters = &[_]OperandKind{} }, + .{ .name = "RoundingModeRTE", .value = 4467, .parameters = &[_]OperandKind{} }, + .{ .name = "RoundingModeRTZ", .value = 4468, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryProvisionalKHR", .value = 4471, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryKHR", .value = 4472, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTraversalPrimitiveCullingKHR", .value = 4478, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTracingKHR", .value = 4479, .parameters = &[_]OperandKind{} }, + .{ .name = "Float16ImageAMD", .value = 5008, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageGatherBiasLodAMD", .value = 5009, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentMaskAMD", .value = 5010, .parameters = &[_]OperandKind{} }, + .{ .name = "StencilExportEXT", .value = 5013, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageReadWriteLodAMD", .value = 5015, .parameters = &[_]OperandKind{} }, + .{ .name = "Int64ImageEXT", .value = 5016, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderClockKHR", .value = 5055, .parameters = &[_]OperandKind{} }, + .{ .name = "SampleMaskOverrideCoverageNV", .value = 5249, .parameters = &[_]OperandKind{} }, + .{ .name = "GeometryShaderPassthroughNV", .value = 5251, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderViewportIndexLayerEXT", .value = 5254, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderViewportIndexLayerNV", .value = 5254, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderViewportMaskNV", .value = 5255, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderStereoViewNV", .value = 5259, .parameters = &[_]OperandKind{} }, + .{ .name = "PerViewAttributesNV", .value = 5260, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentFullyCoveredEXT", .value = 5265, .parameters = &[_]OperandKind{} }, + .{ .name = "MeshShadingNV", .value = 5266, .parameters = &[_]OperandKind{} }, + .{ .name = "ImageFootprintNV", .value = 5282, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentBarycentricKHR", .value = 5284, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentBarycentricNV", .value = 5284, .parameters = &[_]OperandKind{} }, + .{ .name = "ComputeDerivativeGroupQuadsNV", .value = 5288, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentDensityEXT", .value = 5291, .parameters = &[_]OperandKind{} }, + .{ .name = "ShadingRateNV", .value = 5291, .parameters = &[_]OperandKind{} }, + .{ .name = "GroupNonUniformPartitionedNV", .value = 5297, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderNonUniform", .value = 5301, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderNonUniformEXT", .value = 5301, .parameters = &[_]OperandKind{} }, + .{ .name = "RuntimeDescriptorArray", .value = 5302, .parameters = &[_]OperandKind{} }, + .{ .name = "RuntimeDescriptorArrayEXT", .value = 5302, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachmentArrayDynamicIndexing", .value = 5303, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachmentArrayDynamicIndexingEXT", .value = 5303, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformTexelBufferArrayDynamicIndexing", .value = 5304, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformTexelBufferArrayDynamicIndexingEXT", .value = 5304, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageTexelBufferArrayDynamicIndexing", .value = 5305, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageTexelBufferArrayDynamicIndexingEXT", .value = 5305, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformBufferArrayNonUniformIndexing", .value = 5306, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformBufferArrayNonUniformIndexingEXT", .value = 5306, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledImageArrayNonUniformIndexing", .value = 5307, .parameters = &[_]OperandKind{} }, + .{ .name = "SampledImageArrayNonUniformIndexingEXT", .value = 5307, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBufferArrayNonUniformIndexing", .value = 5308, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageBufferArrayNonUniformIndexingEXT", .value = 5308, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageArrayNonUniformIndexing", .value = 5309, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageImageArrayNonUniformIndexingEXT", .value = 5309, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachmentArrayNonUniformIndexing", .value = 5310, .parameters = &[_]OperandKind{} }, + .{ .name = "InputAttachmentArrayNonUniformIndexingEXT", .value = 5310, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformTexelBufferArrayNonUniformIndexing", .value = 5311, .parameters = &[_]OperandKind{} }, + .{ .name = "UniformTexelBufferArrayNonUniformIndexingEXT", .value = 5311, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageTexelBufferArrayNonUniformIndexing", .value = 5312, .parameters = &[_]OperandKind{} }, + .{ .name = "StorageTexelBufferArrayNonUniformIndexingEXT", .value = 5312, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTracingNV", .value = 5340, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTracingMotionBlurNV", .value = 5341, .parameters = &[_]OperandKind{} }, + .{ .name = "VulkanMemoryModel", .value = 5345, .parameters = &[_]OperandKind{} }, + .{ .name = "VulkanMemoryModelKHR", .value = 5345, .parameters = &[_]OperandKind{} }, + .{ .name = "VulkanMemoryModelDeviceScope", .value = 5346, .parameters = &[_]OperandKind{} }, + .{ .name = "VulkanMemoryModelDeviceScopeKHR", .value = 5346, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBufferAddresses", .value = 5347, .parameters = &[_]OperandKind{} }, + .{ .name = "PhysicalStorageBufferAddressesEXT", .value = 5347, .parameters = &[_]OperandKind{} }, + .{ .name = "ComputeDerivativeGroupLinearNV", .value = 5350, .parameters = &[_]OperandKind{} }, + .{ .name = "RayTracingProvisionalKHR", .value = 5353, .parameters = &[_]OperandKind{} }, + .{ .name = "CooperativeMatrixNV", .value = 5357, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentShaderSampleInterlockEXT", .value = 5363, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentShaderShadingRateInterlockEXT", .value = 5372, .parameters = &[_]OperandKind{} }, + .{ .name = "ShaderSMBuiltinsNV", .value = 5373, .parameters = &[_]OperandKind{} }, + .{ .name = "FragmentShaderPixelInterlockEXT", .value = 5378, .parameters = &[_]OperandKind{} }, + .{ .name = "DemoteToHelperInvocation", .value = 5379, .parameters = &[_]OperandKind{} }, + .{ .name = "DemoteToHelperInvocationEXT", .value = 5379, .parameters = &[_]OperandKind{} }, + .{ .name = "BindlessTextureNV", .value = 5390, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupShuffleINTEL", .value = 5568, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupBufferBlockIOINTEL", .value = 5569, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupImageBlockIOINTEL", .value = 5570, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupImageMediaBlockIOINTEL", .value = 5579, .parameters = &[_]OperandKind{} }, + .{ .name = "RoundToInfinityINTEL", .value = 5582, .parameters = &[_]OperandKind{} }, + .{ .name = "FloatingPointModeINTEL", .value = 5583, .parameters = &[_]OperandKind{} }, + .{ .name = "IntegerFunctions2INTEL", .value = 5584, .parameters = &[_]OperandKind{} }, + .{ .name = "FunctionPointersINTEL", .value = 5603, .parameters = &[_]OperandKind{} }, + .{ .name = "IndirectReferencesINTEL", .value = 5604, .parameters = &[_]OperandKind{} }, + .{ .name = "AsmINTEL", .value = 5606, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat32MinMaxEXT", .value = 5612, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat64MinMaxEXT", .value = 5613, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat16MinMaxEXT", .value = 5616, .parameters = &[_]OperandKind{} }, + .{ .name = "VectorComputeINTEL", .value = 5617, .parameters = &[_]OperandKind{} }, + .{ .name = "VectorAnyINTEL", .value = 5619, .parameters = &[_]OperandKind{} }, + .{ .name = "ExpectAssumeKHR", .value = 5629, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupAvcMotionEstimationINTEL", .value = 5696, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupAvcMotionEstimationIntraINTEL", .value = 5697, .parameters = &[_]OperandKind{} }, + .{ .name = "SubgroupAvcMotionEstimationChromaINTEL", .value = 5698, .parameters = &[_]OperandKind{} }, + .{ .name = "VariableLengthArrayINTEL", .value = 5817, .parameters = &[_]OperandKind{} }, + .{ .name = "FunctionFloatControlINTEL", .value = 5821, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGAMemoryAttributesINTEL", .value = 5824, .parameters = &[_]OperandKind{} }, + .{ .name = "FPFastMathModeINTEL", .value = 5837, .parameters = &[_]OperandKind{} }, + .{ .name = "ArbitraryPrecisionIntegersINTEL", .value = 5844, .parameters = &[_]OperandKind{} }, + .{ .name = "ArbitraryPrecisionFloatingPointINTEL", .value = 5845, .parameters = &[_]OperandKind{} }, + .{ .name = "UnstructuredLoopControlsINTEL", .value = 5886, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGALoopControlsINTEL", .value = 5888, .parameters = &[_]OperandKind{} }, + .{ .name = "KernelAttributesINTEL", .value = 5892, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGAKernelAttributesINTEL", .value = 5897, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGAMemoryAccessesINTEL", .value = 5898, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGAClusterAttributesINTEL", .value = 5904, .parameters = &[_]OperandKind{} }, + .{ .name = "LoopFuseINTEL", .value = 5906, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGABufferLocationINTEL", .value = 5920, .parameters = &[_]OperandKind{} }, + .{ .name = "ArbitraryPrecisionFixedPointINTEL", .value = 5922, .parameters = &[_]OperandKind{} }, + .{ .name = "USMStorageClassesINTEL", .value = 5935, .parameters = &[_]OperandKind{} }, + .{ .name = "IOPipesINTEL", .value = 5943, .parameters = &[_]OperandKind{} }, + .{ .name = "BlockingPipesINTEL", .value = 5945, .parameters = &[_]OperandKind{} }, + .{ .name = "FPGARegINTEL", .value = 5948, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInputAll", .value = 6016, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInputAllKHR", .value = 6016, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInput4x8Bit", .value = 6017, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInput4x8BitKHR", .value = 6017, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInput4x8BitPacked", .value = 6018, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductInput4x8BitPackedKHR", .value = 6018, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProduct", .value = 6019, .parameters = &[_]OperandKind{} }, + .{ .name = "DotProductKHR", .value = 6019, .parameters = &[_]OperandKind{} }, + .{ .name = "BitInstructions", .value = 6025, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat32AddEXT", .value = 6033, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat64AddEXT", .value = 6034, .parameters = &[_]OperandKind{} }, + .{ .name = "LongConstantCompositeINTEL", .value = 6089, .parameters = &[_]OperandKind{} }, + .{ .name = "OptNoneINTEL", .value = 6094, .parameters = &[_]OperandKind{} }, + .{ .name = "AtomicFloat16AddEXT", .value = 6095, .parameters = &[_]OperandKind{} }, + .{ .name = "DebugInfoModuleINTEL", .value = 6114, .parameters = &[_]OperandKind{} }, + }, + .RayQueryIntersection => &[_]Enumerant{ + .{ .name = "RayQueryCandidateIntersectionKHR", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryCommittedIntersectionKHR", .value = 1, .parameters = &[_]OperandKind{} }, + }, + .RayQueryCommittedIntersectionType => &[_]Enumerant{ + .{ .name = "RayQueryCommittedIntersectionNoneKHR", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryCommittedIntersectionTriangleKHR", .value = 1, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryCommittedIntersectionGeneratedKHR", .value = 2, .parameters = &[_]OperandKind{} }, + }, + .RayQueryCandidateIntersectionType => &[_]Enumerant{ + .{ .name = "RayQueryCandidateIntersectionTriangleKHR", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "RayQueryCandidateIntersectionAABBKHR", .value = 1, .parameters = &[_]OperandKind{} }, + }, + .PackedVectorFormat => &[_]Enumerant{ + .{ .name = "PackedVectorFormat4x8Bit", .value = 0, .parameters = &[_]OperandKind{} }, + .{ .name = "PackedVectorFormat4x8BitKHR", .value = 0, .parameters = &[_]OperandKind{} }, + }, + .IdResultType => unreachable, + .IdResult => unreachable, + .IdMemorySemantics => unreachable, + .IdScope => unreachable, + .IdRef => unreachable, + .LiteralInteger => unreachable, + .LiteralString => unreachable, + .LiteralContextDependentNumber => unreachable, + .LiteralExtInstInteger => unreachable, + .LiteralSpecConstantOpInteger => unreachable, + .PairLiteralIntegerIdRef => unreachable, + .PairIdRefLiteralInteger => unreachable, + .PairIdRefIdRef => unreachable, + }; + } +}; pub const Opcode = enum(u16) { OpNop = 0, OpUndef = 1, @@ -398,6 +1493,12 @@ pub const Opcode = enum(u16) { OpConvertUToAccelerationStructureKHR = 4447, OpIgnoreIntersectionKHR = 4448, OpTerminateRayKHR = 4449, + OpSDot = 4450, + OpUDot = 4451, + OpSUDot = 4452, + OpSDotAccSat = 4453, + OpUDotAccSat = 4454, + OpSUDotAccSat = 4455, OpTypeRayQueryKHR = 4472, OpRayQueryInitializeKHR = 4473, OpRayQueryTerminateKHR = 4474, @@ -423,6 +1524,8 @@ pub const Opcode = enum(u16) { OpIgnoreIntersectionNV = 5335, OpTerminateRayNV = 5336, OpTraceNV = 5337, + OpTraceMotionNV = 5338, + OpTraceRayMotionNV = 5339, OpTypeAccelerationStructureKHR = 5341, OpExecuteCallableNV = 5344, OpTypeCooperativeMatrixNV = 5358, @@ -432,8 +1535,15 @@ pub const Opcode = enum(u16) { OpCooperativeMatrixLengthNV = 5362, OpBeginInvocationInterlockEXT = 5364, OpEndInvocationInterlockEXT = 5365, - OpDemoteToHelperInvocationEXT = 5380, + OpDemoteToHelperInvocation = 5380, OpIsHelperInvocationEXT = 5381, + OpConvertUToImageNV = 5391, + OpConvertUToSamplerNV = 5392, + OpConvertImageToUNV = 5393, + OpConvertSamplerToUNV = 5394, + OpConvertUToSampledImageNV = 5395, + OpConvertSampledImageToUNV = 5396, + OpSamplerImageAddressingModeNV = 5397, OpSubgroupShuffleINTEL = 5571, OpSubgroupShuffleDownINTEL = 5572, OpSubgroupShuffleUpINTEL = 5573, @@ -458,141 +1568,13 @@ pub const Opcode = enum(u16) { OpUSubSatINTEL = 5596, OpIMul32x16INTEL = 5597, OpUMul32x16INTEL = 5598, - OpConstFunctionPointerINTEL = 5600, - OpFunctionPointerCallINTEL = 5601, - OpAsmTargetINTEL = 5609, - OpAsmINTEL = 5610, - OpAsmCallINTEL = 5611, OpAtomicFMinEXT = 5614, OpAtomicFMaxEXT = 5615, OpAssumeTrueKHR = 5630, OpExpectKHR = 5631, OpDecorateString = 5632, OpMemberDecorateString = 5633, - OpVmeImageINTEL = 5699, - OpTypeVmeImageINTEL = 5700, - OpTypeAvcImePayloadINTEL = 5701, - OpTypeAvcRefPayloadINTEL = 5702, - OpTypeAvcSicPayloadINTEL = 5703, - OpTypeAvcMcePayloadINTEL = 5704, - OpTypeAvcMceResultINTEL = 5705, - OpTypeAvcImeResultINTEL = 5706, - OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, - OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, - OpTypeAvcImeSingleReferenceStreaminINTEL = 5709, - OpTypeAvcImeDualReferenceStreaminINTEL = 5710, - OpTypeAvcRefResultINTEL = 5711, - OpTypeAvcSicResultINTEL = 5712, - OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, - OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, - OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, - OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, - OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, - OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, - OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, - OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, - OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, - OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, - OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, - OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, - OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, - OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, - OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, - OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, - OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, - OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, - OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, - OpSubgroupAvcMceConvertToImePayloadINTEL = 5732, - OpSubgroupAvcMceConvertToImeResultINTEL = 5733, - OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, - OpSubgroupAvcMceConvertToRefResultINTEL = 5735, - OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, - OpSubgroupAvcMceConvertToSicResultINTEL = 5737, - OpSubgroupAvcMceGetMotionVectorsINTEL = 5738, - OpSubgroupAvcMceGetInterDistortionsINTEL = 5739, - OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, - OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, - OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, - OpSubgroupAvcMceGetInterDirectionsINTEL = 5743, - OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, - OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, - OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, - OpSubgroupAvcImeInitializeINTEL = 5747, - OpSubgroupAvcImeSetSingleReferenceINTEL = 5748, - OpSubgroupAvcImeSetDualReferenceINTEL = 5749, - OpSubgroupAvcImeRefWindowSizeINTEL = 5750, - OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, - OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, - OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, - OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, - OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, - OpSubgroupAvcImeSetWeightedSadINTEL = 5756, - OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, - OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, - OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, - OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, - OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, - OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, - OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, - OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, - OpSubgroupAvcImeConvertToMceResultINTEL = 5765, - OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, - OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, - OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, - OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, - OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, - OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, - OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, - OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, - OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, - OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, - OpSubgroupAvcImeGetBorderReachedINTEL = 5776, - OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, - OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, - OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, - OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, - OpSubgroupAvcFmeInitializeINTEL = 5781, - OpSubgroupAvcBmeInitializeINTEL = 5782, - OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, - OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, - OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, - OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, - OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, - OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, - OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, - OpSubgroupAvcRefConvertToMceResultINTEL = 5790, - OpSubgroupAvcSicInitializeINTEL = 5791, - OpSubgroupAvcSicConfigureSkcINTEL = 5792, - OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, - OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, - OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, - OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, - OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, - OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, - OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, - OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, - OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, - OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, - OpSubgroupAvcSicEvaluateIpeINTEL = 5803, - OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, - OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, - OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, - OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, - OpSubgroupAvcSicConvertToMceResultINTEL = 5808, - OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, - OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, - OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, - OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, - OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, - OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, - OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, - OpSubgroupAvcSicGetInterRawSadsINTEL = 5816, - OpVariableLengthArrayINTEL = 5818, - OpSaveMemoryINTEL = 5819, - OpRestoreMemoryINTEL = 5820, OpLoopControlINTEL = 5887, - OpPtrCastToCrossWorkgroupINTEL = 5934, - OpCrossWorkgroupCastToPtrINTEL = 5938, OpReadPipeBlockingINTEL = 5946, OpWritePipeBlockingINTEL = 5947, OpFPGARegINTEL = 5949, @@ -619,8 +1601,15 @@ pub const Opcode = enum(u16) { OpConstantCompositeContinuedINTEL = 6091, OpSpecConstantCompositeContinuedINTEL = 6092, + pub const OpSDotKHR = Opcode.OpSDot; + pub const OpUDotKHR = Opcode.OpUDot; + pub const OpSUDotKHR = Opcode.OpSUDot; + pub const OpSDotAccSatKHR = Opcode.OpSDotAccSat; + pub const OpUDotAccSatKHR = Opcode.OpUDotAccSat; + pub const OpSUDotAccSatKHR = Opcode.OpSUDotAccSat; pub const OpReportIntersectionNV = Opcode.OpReportIntersectionKHR; pub const OpTypeAccelerationStructureNV = Opcode.OpTypeAccelerationStructureKHR; + pub const OpDemoteToHelperInvocationEXT = Opcode.OpDemoteToHelperInvocation; pub const OpDecorateStringGOOGLE = Opcode.OpDecorateString; pub const OpMemberDecorateStringGOOGLE = Opcode.OpMemberDecorateString; @@ -982,6 +1971,12 @@ pub const Opcode = enum(u16) { .OpConvertUToAccelerationStructureKHR => struct { id_result_type: IdResultType, id_result: IdResult, accel: IdRef }, .OpIgnoreIntersectionKHR => void, .OpTerminateRayKHR => void, + .OpSDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, + .OpUDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, + .OpSUDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, + .OpSDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, + .OpUDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, + .OpSUDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpTypeRayQueryKHR => struct { id_result: IdResult }, .OpRayQueryInitializeKHR => struct { rayquery: IdRef, accel: IdRef, rayflags: IdRef, cullmask: IdRef, rayorigin: IdRef, raytmin: IdRef, raydirection: IdRef, raytmax: IdRef }, .OpRayQueryTerminateKHR => struct { rayquery: IdRef }, @@ -1007,6 +2002,8 @@ pub const Opcode = enum(u16) { .OpIgnoreIntersectionNV => void, .OpTerminateRayNV => void, .OpTraceNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, payloadid: IdRef }, + .OpTraceMotionNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, time: IdRef, payloadid: IdRef }, + .OpTraceRayMotionNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, time: IdRef, payload: IdRef }, .OpTypeAccelerationStructureKHR => struct { id_result: IdResult }, .OpExecuteCallableNV => struct { sbt_index: IdRef, callable_dataid: IdRef }, .OpTypeCooperativeMatrixNV => struct { id_result: IdResult, component_type: IdRef, execution: IdScope, rows: IdRef, columns: IdRef }, @@ -1016,8 +2013,15 @@ pub const Opcode = enum(u16) { .OpCooperativeMatrixLengthNV => struct { id_result_type: IdResultType, id_result: IdResult, type: IdRef }, .OpBeginInvocationInterlockEXT => void, .OpEndInvocationInterlockEXT => void, - .OpDemoteToHelperInvocationEXT => void, + .OpDemoteToHelperInvocation => void, .OpIsHelperInvocationEXT => struct { id_result_type: IdResultType, id_result: IdResult }, + .OpConvertUToImageNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpConvertUToSamplerNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpConvertImageToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpConvertSamplerToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpConvertUToSampledImageNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpConvertSampledImageToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, + .OpSamplerImageAddressingModeNV => struct { bit_width: LiteralInteger }, .OpSubgroupShuffleINTEL => struct { id_result_type: IdResultType, id_result: IdResult, data: IdRef, invocationid: IdRef }, .OpSubgroupShuffleDownINTEL => struct { id_result_type: IdResultType, id_result: IdResult, current: IdRef, next: IdRef, delta: IdRef }, .OpSubgroupShuffleUpINTEL => struct { id_result_type: IdResultType, id_result: IdResult, previous: IdRef, current: IdRef, delta: IdRef }, @@ -1042,141 +2046,13 @@ pub const Opcode = enum(u16) { .OpUSubSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, - .OpConstFunctionPointerINTEL => struct { id_result_type: IdResultType, id_result: IdResult, function: IdRef }, - .OpFunctionPointerCallINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: []const IdRef = &.{} }, - .OpAsmTargetINTEL => struct { id_result_type: IdResultType, id_result: IdResult, asm_target: LiteralString }, - .OpAsmINTEL => struct { id_result_type: IdResultType, id_result: IdResult, asm_type: IdRef, target: IdRef, asm_instructions: LiteralString, constraints: LiteralString }, - .OpAsmCallINTEL => struct { id_result_type: IdResultType, id_result: IdResult, @"asm": IdRef, argument_0: []const IdRef = &.{} }, .OpAtomicFMinEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicFMaxEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAssumeTrueKHR => struct { condition: IdRef }, .OpExpectKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef, expectedvalue: IdRef }, .OpDecorateString => struct { target: IdRef, decoration: Decoration.Extended }, .OpMemberDecorateString => struct { struct_type: IdRef, member: LiteralInteger, decoration: Decoration.Extended }, - .OpVmeImageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image_type: IdRef, sampler: IdRef }, - .OpTypeVmeImageINTEL => struct { id_result: IdResult, image_type: IdRef }, - .OpTypeAvcImePayloadINTEL => struct { id_result: IdResult }, - .OpTypeAvcRefPayloadINTEL => struct { id_result: IdResult }, - .OpTypeAvcSicPayloadINTEL => struct { id_result: IdResult }, - .OpTypeAvcMcePayloadINTEL => struct { id_result: IdResult }, - .OpTypeAvcMceResultINTEL => struct { id_result: IdResult }, - .OpTypeAvcImeResultINTEL => struct { id_result: IdResult }, - .OpTypeAvcImeResultSingleReferenceStreamoutINTEL => struct { id_result: IdResult }, - .OpTypeAvcImeResultDualReferenceStreamoutINTEL => struct { id_result: IdResult }, - .OpTypeAvcImeSingleReferenceStreaminINTEL => struct { id_result: IdResult }, - .OpTypeAvcImeDualReferenceStreaminINTEL => struct { id_result: IdResult }, - .OpTypeAvcRefResultINTEL => struct { id_result: IdResult }, - .OpTypeAvcSicResultINTEL => struct { id_result: IdResult }, - .OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, reference_base_penalty: IdRef, payload: IdRef }, - .OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceSetInterShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_shape_penalty: IdRef, payload: IdRef }, - .OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceSetInterDirectionPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, direction_cost: IdRef, payload: IdRef }, - .OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_cost_center_delta: IdRef, packed_cost_table: IdRef, cost_precision: IdRef, payload: IdRef }, - .OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, slice_type: IdRef, qp: IdRef }, - .OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpSubgroupAvcMceSetAcOnlyHaarINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL => struct { id_result_type: IdResultType, id_result: IdResult, source_field_polarity: IdRef, payload: IdRef }, - .OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL => struct { id_result_type: IdResultType, id_result: IdResult, reference_field_polarity: IdRef, payload: IdRef }, - .OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, forward_reference_field_polarity: IdRef, backward_reference_field_polarity: IdRef, payload: IdRef }, - .OpSubgroupAvcMceConvertToImePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceConvertToImeResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceConvertToRefPayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceConvertToRefResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceConvertToSicPayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceConvertToSicResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetBestInterDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterMajorShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterMinorShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterDirectionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterMotionVectorCountINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_reference_ids: IdRef, packed_reference_parameter_field_polarities: IdRef, payload: IdRef }, - .OpSubgroupAvcImeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, partition_mask: IdRef, sad_adjustment: IdRef }, - .OpSubgroupAvcImeSetSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ref_offset: IdRef, search_window_config: IdRef, payload: IdRef }, - .OpSubgroupAvcImeSetDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, fwd_ref_offset: IdRef, bwd_ref_offset: IdRef, id_ref_4: IdRef, payload: IdRef }, - .OpSubgroupAvcImeRefWindowSizeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, search_window_config: IdRef, dual_ref: IdRef }, - .OpSubgroupAvcImeAdjustRefOffsetINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ref_offset: IdRef, src_coord: IdRef, ref_window_size: IdRef, image_size: IdRef }, - .OpSubgroupAvcImeConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeSetMaxMotionVectorCountINTEL => struct { id_result_type: IdResultType, id_result: IdResult, max_motion_vector_count: IdRef, payload: IdRef }, - .OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, threshold: IdRef, payload: IdRef }, - .OpSubgroupAvcImeSetWeightedSadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_sad_weights: IdRef, payload: IdRef }, - .OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcImeEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef, streamin_components: IdRef }, - .OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef, streamin_components: IdRef }, - .OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef, streamin_components: IdRef }, - .OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef, streamin_components: IdRef }, - .OpSubgroupAvcImeConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetSingleReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetDualReferenceStreaminINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeStripDualReferenceStreamoutINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef }, - .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef }, - .OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef }, - .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef }, - .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef }, - .OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef, major_shape: IdRef, direction: IdRef }, - .OpSubgroupAvcImeGetBorderReachedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image_select: IdRef, payload: IdRef }, - .OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcFmeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, motion_vectors: IdRef, major_shapes: IdRef, minor_shapes: IdRef, direction: IdRef, pixel_resolution: IdRef, sad_adjustment: IdRef }, - .OpSubgroupAvcBmeInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef, motion_vectors: IdRef, major_shapes: IdRef, minor_shapes: IdRef, direction: IdRef, pixel_resolution: IdRef, bidirectional_weight: IdRef, sad_adjustment: IdRef }, - .OpSubgroupAvcRefConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcRefSetBidirectionalMixDisableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcRefSetBilinearFilterEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcRefEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, payload: IdRef }, - .OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, packed_reference_field_polarities: IdRef, payload: IdRef }, - .OpSubgroupAvcRefConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicInitializeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_coord: IdRef }, - .OpSubgroupAvcSicConfigureSkcINTEL => struct { id_result_type: IdResultType, id_result: IdResult, skip_block_partition_type: IdRef, skip_motion_vector_mask: IdRef, motion_vectors: IdRef, bidirectional_weight: IdRef, sad_adjustment: IdRef, payload: IdRef }, - .OpSubgroupAvcSicConfigureIpeLumaINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_intra_partition_mask: IdRef, intra_neighbour_availabilty: IdRef, left_edge_luma_pixels: IdRef, upper_left_corner_luma_pixel: IdRef, upper_edge_luma_pixels: IdRef, upper_right_edge_luma_pixels: IdRef, sad_adjustment: IdRef, payload: IdRef }, - .OpSubgroupAvcSicConfigureIpeLumaChromaINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_intra_partition_mask: IdRef, intra_neighbour_availabilty: IdRef, left_edge_luma_pixels: IdRef, upper_left_corner_luma_pixel: IdRef, upper_edge_luma_pixels: IdRef, upper_right_edge_luma_pixels: IdRef, left_edge_chroma_pixels: IdRef, upper_left_corner_chroma_pixel: IdRef, upper_edge_chroma_pixels: IdRef, sad_adjustment: IdRef, payload: IdRef }, - .OpSubgroupAvcSicGetMotionVectorMaskINTEL => struct { id_result_type: IdResultType, id_result: IdResult, skip_block_partition_type: IdRef, direction: IdRef }, - .OpSubgroupAvcSicConvertToMcePayloadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_shape_penalty: IdRef, payload: IdRef }, - .OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, luma_mode_penalty: IdRef, luma_packed_neighbor_modes: IdRef, luma_packed_non_dc_penalty: IdRef, payload: IdRef }, - .OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, chroma_mode_base_penalty: IdRef, payload: IdRef }, - .OpSubgroupAvcSicSetBilinearFilterEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packed_sad_coefficients: IdRef, payload: IdRef }, - .OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, block_based_skip_type: IdRef, payload: IdRef }, - .OpSubgroupAvcSicEvaluateIpeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, payload: IdRef }, - .OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcSicEvaluateWithDualReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, fwd_ref_image: IdRef, bwd_ref_image: IdRef, payload: IdRef }, - .OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, payload: IdRef }, - .OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, src_image: IdRef, packed_reference_ids: IdRef, packed_reference_field_polarities: IdRef, payload: IdRef }, - .OpSubgroupAvcSicConvertToMceResultINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetIpeLumaShapeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetPackedIpeLumaModesINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetIpeChromaModeINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpSubgroupAvcSicGetInterRawSadsINTEL => struct { id_result_type: IdResultType, id_result: IdResult, payload: IdRef }, - .OpVariableLengthArrayINTEL => struct { id_result_type: IdResultType, id_result: IdResult, lenght: IdRef }, - .OpSaveMemoryINTEL => struct { id_result_type: IdResultType, id_result: IdResult }, - .OpRestoreMemoryINTEL => struct { ptr: IdRef }, .OpLoopControlINTEL => struct { loop_control_parameters: []const LiteralInteger = &.{} }, - .OpPtrCastToCrossWorkgroupINTEL => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, - .OpCrossWorkgroupCastToPtrINTEL => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpReadPipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef }, .OpWritePipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef }, .OpFPGARegINTEL => struct { id_result_type: IdResultType, id_result: IdResult, result: IdRef, input: IdRef }, @@ -1198,12 +2074,3169 @@ pub const Opcode = enum(u16) { .OpRayQueryGetIntersectionObjectToWorldKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionWorldToObjectKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpAtomicFAddEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, - .OpTypeBufferSurfaceINTEL => struct { id_result: IdResult }, + .OpTypeBufferSurfaceINTEL => struct { id_result: IdResult, accessqualifier: AccessQualifier }, .OpTypeStructContinuedINTEL => struct { id_ref: []const IdRef = &.{} }, .OpConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} }, .OpSpecConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} }, }; } + pub fn operands(self: Opcode) []const Operand { + return switch (self) { + .OpNop => &[_]Operand{}, + .OpUndef => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpSourceContinued => &[_]Operand{ + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpSource => &[_]Operand{ + .{ .kind = .SourceLanguage, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + .{ .kind = .LiteralString, .quantifier = .optional }, + }, + .OpSourceExtension => &[_]Operand{ + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpName => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpMemberName => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpString => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpLine => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpExtension => &[_]Operand{ + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpExtInstImport => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpExtInst => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralExtInstInteger, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpMemoryModel => &[_]Operand{ + .{ .kind = .AddressingModel, .quantifier = .required }, + .{ .kind = .MemoryModel, .quantifier = .required }, + }, + .OpEntryPoint => &[_]Operand{ + .{ .kind = .ExecutionModel, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpExecutionMode => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ExecutionMode, .quantifier = .required }, + }, + .OpCapability => &[_]Operand{ + .{ .kind = .Capability, .quantifier = .required }, + }, + .OpTypeVoid => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeBool => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeInt => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpTypeFloat => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpTypeVector => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpTypeMatrix => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpTypeImage => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .Dim, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .ImageFormat, .quantifier = .required }, + .{ .kind = .AccessQualifier, .quantifier = .optional }, + }, + .OpTypeSampler => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeSampledImage => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeArray => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeRuntimeArray => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeStruct => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpTypeOpaque => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpTypePointer => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .StorageClass, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeFunction => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpTypeEvent => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeDeviceEvent => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeReserveId => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypeQueue => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpTypePipe => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .AccessQualifier, .quantifier = .required }, + }, + .OpTypeForwardPointer => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .StorageClass, .quantifier = .required }, + }, + .OpConstantTrue => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpConstantFalse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpConstant => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralContextDependentNumber, .quantifier = .required }, + }, + .OpConstantComposite => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpConstantSampler => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .SamplerAddressingMode, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .SamplerFilterMode, .quantifier = .required }, + }, + .OpConstantNull => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpSpecConstantTrue => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpSpecConstantFalse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpSpecConstant => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralContextDependentNumber, .quantifier = .required }, + }, + .OpSpecConstantComposite => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpSpecConstantOp => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralSpecConstantOpInteger, .quantifier = .required }, + }, + .OpFunction => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .FunctionControl, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFunctionParameter => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpFunctionEnd => &[_]Operand{}, + .OpFunctionCall => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpVariable => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .StorageClass, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpImageTexelPointer => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLoad => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpStore => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpCopyMemory => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpCopyMemorySized => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpAccessChain => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpInBoundsAccessChain => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpPtrAccessChain => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpArrayLength => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpGenericPtrMemSemantics => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpInBoundsPtrAccessChain => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpDecorate => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .Decoration, .quantifier = .required }, + }, + .OpMemberDecorate => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .Decoration, .quantifier = .required }, + }, + .OpDecorationGroup => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpGroupDecorate => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpGroupMemberDecorate => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PairIdRefLiteralInteger, .quantifier = .variadic }, + }, + .OpVectorExtractDynamic => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpVectorInsertDynamic => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpVectorShuffle => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .variadic }, + }, + .OpCompositeConstruct => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpCompositeExtract => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .variadic }, + }, + .OpCompositeInsert => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .variadic }, + }, + .OpCopyObject => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTranspose => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSampledImage => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageSampleImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSampleExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSampleDrefImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSampleDrefExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSampleProjImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSampleProjExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSampleProjDrefImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSampleProjDrefExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageFetch => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageGather => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageDrefGather => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageRead => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageWrite => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImage => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQueryFormat => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQueryOrder => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQuerySizeLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQuerySize => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQueryLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQueryLevels => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageQuerySamples => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertFToU => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertFToS => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertSToF => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertUToF => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUConvert => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSConvert => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFConvert => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpQuantizeToF16 => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertPtrToU => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSatConvertSToU => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSatConvertUToS => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertUToPtr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpPtrCastToGeneric => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGenericCastToPtr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGenericCastToPtrExplicit => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .StorageClass, .quantifier = .required }, + }, + .OpBitcast => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSNegate => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFNegate => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpISub => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFSub => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIMul => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFMul => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUDiv => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSDiv => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFDiv => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUMod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSRem => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSMod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFRem => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFMod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpVectorTimesScalar => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpMatrixTimesScalar => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpVectorTimesMatrix => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpMatrixTimesVector => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpMatrixTimesMatrix => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpOuterProduct => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIAddCarry => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpISubBorrow => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUMulExtended => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSMulExtended => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAny => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAll => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIsNan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIsInf => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIsFinite => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIsNormal => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSignBitSet => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLessOrGreater => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpOrdered => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUnordered => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLogicalEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLogicalNotEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLogicalOr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLogicalAnd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpLogicalNot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSelect => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpINotEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUGreaterThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSGreaterThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUGreaterThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSGreaterThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpULessThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSLessThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpULessThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSLessThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdNotEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordNotEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdLessThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordLessThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdGreaterThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordGreaterThan => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdLessThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordLessThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFOrdGreaterThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFUnordGreaterThanEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpShiftRightLogical => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpShiftRightArithmetic => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpShiftLeftLogical => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitwiseOr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitwiseXor => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitwiseAnd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpNot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitFieldInsert => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitFieldSExtract => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitFieldUExtract => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitReverse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBitCount => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdx => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdy => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFwidth => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdxFine => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdyFine => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFwidthFine => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdxCoarse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDPdyCoarse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFwidthCoarse => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpEmitVertex => &[_]Operand{}, + .OpEndPrimitive => &[_]Operand{}, + .OpEmitStreamVertex => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpEndStreamPrimitive => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpControlBarrier => &[_]Operand{ + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpMemoryBarrier => &[_]Operand{ + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpAtomicLoad => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpAtomicStore => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicExchange => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicCompareExchange => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicCompareExchangeWeak => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicIIncrement => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpAtomicIDecrement => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpAtomicIAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicISub => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicSMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicUMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicSMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicUMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicAnd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicOr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicXor => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpPhi => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .PairIdRefIdRef, .quantifier = .variadic }, + }, + .OpLoopMerge => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LoopControl, .quantifier = .required }, + }, + .OpSelectionMerge => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .SelectionControl, .quantifier = .required }, + }, + .OpLabel => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpBranch => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBranchConditional => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .variadic }, + }, + .OpSwitch => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PairLiteralIntegerIdRef, .quantifier = .variadic }, + }, + .OpKill => &[_]Operand{}, + .OpReturn => &[_]Operand{}, + .OpReturnValue => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUnreachable => &[_]Operand{}, + .OpLifetimeStart => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpLifetimeStop => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpGroupAsyncCopy => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupWaitEvents => &[_]Operand{ + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupAll => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupAny => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupBroadcast => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupIAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupUMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupSMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupUMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupSMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReadPipe => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpWritePipe => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReservedReadPipe => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReservedWritePipe => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReserveReadPipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReserveWritePipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCommitReadPipe => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCommitWritePipe => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIsValidReserveId => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetNumPipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetMaxPipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupReserveReadPipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupReserveWritePipePackets => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupCommitReadPipe => &[_]Operand{ + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupCommitWritePipe => &[_]Operand{ + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpEnqueueMarker => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpEnqueueKernel => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpGetKernelNDrangeSubGroupCount => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetKernelNDrangeMaxSubGroupSize => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetKernelWorkGroupSize => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetKernelPreferredWorkGroupSizeMultiple => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRetainEvent => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReleaseEvent => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCreateUserEvent => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpIsValidEvent => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSetUserEventStatus => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCaptureEventProfilingInfo => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetDefaultQueue => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpBuildNDRange => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpImageSparseSampleImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseSampleExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSparseSampleDrefImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseSampleDrefExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSparseSampleProjImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseSampleProjExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSparseSampleProjDrefImplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseSampleProjDrefExplicitLod => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .required }, + }, + .OpImageSparseFetch => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseGather => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseDrefGather => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpImageSparseTexelsResident => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpNoLine => &[_]Operand{}, + .OpAtomicFlagTestAndSet => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpAtomicFlagClear => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpImageSparseRead => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpSizeOf => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypePipeStorage => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpConstantPipeStorage => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpCreatePipeFromPipeStorage => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetKernelLocalSizeForSubgroupCount => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGetKernelMaxNumSubgroups => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeNamedBarrier => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpNamedBarrierInitialize => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpMemoryNamedBarrier => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + }, + .OpModuleProcessed => &[_]Operand{ + .{ .kind = .LiteralString, .quantifier = .required }, + }, + .OpExecutionModeId => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ExecutionMode, .quantifier = .required }, + }, + .OpDecorateId => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .Decoration, .quantifier = .required }, + }, + .OpGroupNonUniformElect => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + }, + .OpGroupNonUniformAll => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformAny => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformAllEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBroadcast => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBroadcastFirst => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBallot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformInverseBallot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBallotBitExtract => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBallotBitCount => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBallotFindLSB => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformBallotFindMSB => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformShuffle => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformShuffleXor => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformShuffleUp => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformShuffleDown => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformIAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformFAdd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformIMul => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformFMul => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformSMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformUMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformFMin => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformSMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformUMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformFMax => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformBitwiseAnd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformBitwiseOr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformBitwiseXor => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformLogicalAnd => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformLogicalOr => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformLogicalXor => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .optional }, + }, + .OpGroupNonUniformQuadBroadcast => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupNonUniformQuadSwap => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCopyLogical => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpPtrEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpPtrNotEqual => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpPtrDiff => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTerminateInvocation => &[_]Operand{}, + .OpSubgroupBallotKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupFirstInvocationKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupAllKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupAnyKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupAllEqualKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupReadInvocationKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTraceRayKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpExecuteCallableKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertUToAccelerationStructureKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIgnoreIntersectionKHR => &[_]Operand{}, + .OpTerminateRayKHR => &[_]Operand{}, + .OpSDot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpUDot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpSUDot => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpSDotAccSat => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpUDotAccSat => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpSUDotAccSat => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .PackedVectorFormat, .quantifier = .optional }, + }, + .OpTypeRayQueryKHR => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpRayQueryInitializeKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryTerminateKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGenerateIntersectionKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryConfirmIntersectionKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryProceedKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionTypeKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupIAddNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFAddNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFMinNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupUMinNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupSMinNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupFMaxNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupUMaxNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpGroupSMaxNonUniformAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .GroupOperation, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFragmentMaskFetchAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFragmentFetchAMD => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReadClockKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + }, + .OpImageSampleFootprintNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .ImageOperands, .quantifier = .optional }, + }, + .OpGroupNonUniformPartitionNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpWritePackedPrimitiveIndices4x8NV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpReportIntersectionKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIgnoreIntersectionNV => &[_]Operand{}, + .OpTerminateRayNV => &[_]Operand{}, + .OpTraceNV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTraceMotionNV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTraceRayMotionNV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeAccelerationStructureKHR => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpExecuteCallableNV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeCooperativeMatrixNV => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCooperativeMatrixLoadNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpCooperativeMatrixStoreNV => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .MemoryAccess, .quantifier = .optional }, + }, + .OpCooperativeMatrixMulAddNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpCooperativeMatrixLengthNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpBeginInvocationInterlockEXT => &[_]Operand{}, + .OpEndInvocationInterlockEXT => &[_]Operand{}, + .OpDemoteToHelperInvocation => &[_]Operand{}, + .OpIsHelperInvocationEXT => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + }, + .OpConvertUToImageNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertUToSamplerNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertImageToUNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertSamplerToUNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertUToSampledImageNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpConvertSampledImageToUNV => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSamplerImageAddressingModeNV => &[_]Operand{ + .{ .kind = .LiteralInteger, .quantifier = .required }, + }, + .OpSubgroupShuffleINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupShuffleDownINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupShuffleUpINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupShuffleXorINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupBlockReadINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupBlockWriteINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupImageBlockReadINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupImageBlockWriteINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupImageMediaBlockReadINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpSubgroupImageMediaBlockWriteINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUCountLeadingZerosINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUCountTrailingZerosINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAbsISubINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAbsUSubINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIAddSatINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUAddSatINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIAverageINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUAverageINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIAverageRoundedINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUAverageRoundedINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpISubSatINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUSubSatINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpIMul32x16INTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpUMul32x16INTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicFMinEXT => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicFMaxEXT => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAssumeTrueKHR => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpExpectKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpDecorateString => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .Decoration, .quantifier = .required }, + }, + .OpMemberDecorateString => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .LiteralInteger, .quantifier = .required }, + .{ .kind = .Decoration, .quantifier = .required }, + }, + .OpLoopControlINTEL => &[_]Operand{ + .{ .kind = .LiteralInteger, .quantifier = .variadic }, + }, + .OpReadPipeBlockingINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpWritePipeBlockingINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpFPGARegINTEL => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetRayTMinKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetRayFlagsKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionTKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionInstanceCustomIndexKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionInstanceIdKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionGeometryIndexKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionPrimitiveIndexKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionBarycentricsKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionFrontFaceKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionObjectRayDirectionKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionObjectRayOriginKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetWorldRayDirectionKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetWorldRayOriginKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionObjectToWorldKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpRayQueryGetIntersectionWorldToObjectKHR => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpAtomicFAddEXT => &[_]Operand{ + .{ .kind = .IdResultType, .quantifier = .required }, + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + .{ .kind = .IdScope, .quantifier = .required }, + .{ .kind = .IdMemorySemantics, .quantifier = .required }, + .{ .kind = .IdRef, .quantifier = .required }, + }, + .OpTypeBufferSurfaceINTEL => &[_]Operand{ + .{ .kind = .IdResult, .quantifier = .required }, + .{ .kind = .AccessQualifier, .quantifier = .required }, + }, + .OpTypeStructContinuedINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpConstantCompositeContinuedINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + .OpSpecConstantCompositeContinuedINTEL => &[_]Operand{ + .{ .kind = .IdRef, .quantifier = .variadic }, + }, + }; + } + pub fn class(self: Opcode) Class { + return switch (self) { + .OpNop => .Miscellaneous, + .OpUndef => .Miscellaneous, + .OpSourceContinued => .Debug, + .OpSource => .Debug, + .OpSourceExtension => .Debug, + .OpName => .Debug, + .OpMemberName => .Debug, + .OpString => .Debug, + .OpLine => .Debug, + .OpExtension => .Extension, + .OpExtInstImport => .Extension, + .OpExtInst => .Extension, + .OpMemoryModel => .ModeSetting, + .OpEntryPoint => .ModeSetting, + .OpExecutionMode => .ModeSetting, + .OpCapability => .ModeSetting, + .OpTypeVoid => .TypeDeclaration, + .OpTypeBool => .TypeDeclaration, + .OpTypeInt => .TypeDeclaration, + .OpTypeFloat => .TypeDeclaration, + .OpTypeVector => .TypeDeclaration, + .OpTypeMatrix => .TypeDeclaration, + .OpTypeImage => .TypeDeclaration, + .OpTypeSampler => .TypeDeclaration, + .OpTypeSampledImage => .TypeDeclaration, + .OpTypeArray => .TypeDeclaration, + .OpTypeRuntimeArray => .TypeDeclaration, + .OpTypeStruct => .TypeDeclaration, + .OpTypeOpaque => .TypeDeclaration, + .OpTypePointer => .TypeDeclaration, + .OpTypeFunction => .TypeDeclaration, + .OpTypeEvent => .TypeDeclaration, + .OpTypeDeviceEvent => .TypeDeclaration, + .OpTypeReserveId => .TypeDeclaration, + .OpTypeQueue => .TypeDeclaration, + .OpTypePipe => .TypeDeclaration, + .OpTypeForwardPointer => .TypeDeclaration, + .OpConstantTrue => .ConstantCreation, + .OpConstantFalse => .ConstantCreation, + .OpConstant => .ConstantCreation, + .OpConstantComposite => .ConstantCreation, + .OpConstantSampler => .ConstantCreation, + .OpConstantNull => .ConstantCreation, + .OpSpecConstantTrue => .ConstantCreation, + .OpSpecConstantFalse => .ConstantCreation, + .OpSpecConstant => .ConstantCreation, + .OpSpecConstantComposite => .ConstantCreation, + .OpSpecConstantOp => .ConstantCreation, + .OpFunction => .Function, + .OpFunctionParameter => .Function, + .OpFunctionEnd => .Function, + .OpFunctionCall => .Function, + .OpVariable => .Memory, + .OpImageTexelPointer => .Memory, + .OpLoad => .Memory, + .OpStore => .Memory, + .OpCopyMemory => .Memory, + .OpCopyMemorySized => .Memory, + .OpAccessChain => .Memory, + .OpInBoundsAccessChain => .Memory, + .OpPtrAccessChain => .Memory, + .OpArrayLength => .Memory, + .OpGenericPtrMemSemantics => .Memory, + .OpInBoundsPtrAccessChain => .Memory, + .OpDecorate => .Annotation, + .OpMemberDecorate => .Annotation, + .OpDecorationGroup => .Annotation, + .OpGroupDecorate => .Annotation, + .OpGroupMemberDecorate => .Annotation, + .OpVectorExtractDynamic => .Composite, + .OpVectorInsertDynamic => .Composite, + .OpVectorShuffle => .Composite, + .OpCompositeConstruct => .Composite, + .OpCompositeExtract => .Composite, + .OpCompositeInsert => .Composite, + .OpCopyObject => .Composite, + .OpTranspose => .Composite, + .OpSampledImage => .Image, + .OpImageSampleImplicitLod => .Image, + .OpImageSampleExplicitLod => .Image, + .OpImageSampleDrefImplicitLod => .Image, + .OpImageSampleDrefExplicitLod => .Image, + .OpImageSampleProjImplicitLod => .Image, + .OpImageSampleProjExplicitLod => .Image, + .OpImageSampleProjDrefImplicitLod => .Image, + .OpImageSampleProjDrefExplicitLod => .Image, + .OpImageFetch => .Image, + .OpImageGather => .Image, + .OpImageDrefGather => .Image, + .OpImageRead => .Image, + .OpImageWrite => .Image, + .OpImage => .Image, + .OpImageQueryFormat => .Image, + .OpImageQueryOrder => .Image, + .OpImageQuerySizeLod => .Image, + .OpImageQuerySize => .Image, + .OpImageQueryLod => .Image, + .OpImageQueryLevels => .Image, + .OpImageQuerySamples => .Image, + .OpConvertFToU => .Conversion, + .OpConvertFToS => .Conversion, + .OpConvertSToF => .Conversion, + .OpConvertUToF => .Conversion, + .OpUConvert => .Conversion, + .OpSConvert => .Conversion, + .OpFConvert => .Conversion, + .OpQuantizeToF16 => .Conversion, + .OpConvertPtrToU => .Conversion, + .OpSatConvertSToU => .Conversion, + .OpSatConvertUToS => .Conversion, + .OpConvertUToPtr => .Conversion, + .OpPtrCastToGeneric => .Conversion, + .OpGenericCastToPtr => .Conversion, + .OpGenericCastToPtrExplicit => .Conversion, + .OpBitcast => .Conversion, + .OpSNegate => .Arithmetic, + .OpFNegate => .Arithmetic, + .OpIAdd => .Arithmetic, + .OpFAdd => .Arithmetic, + .OpISub => .Arithmetic, + .OpFSub => .Arithmetic, + .OpIMul => .Arithmetic, + .OpFMul => .Arithmetic, + .OpUDiv => .Arithmetic, + .OpSDiv => .Arithmetic, + .OpFDiv => .Arithmetic, + .OpUMod => .Arithmetic, + .OpSRem => .Arithmetic, + .OpSMod => .Arithmetic, + .OpFRem => .Arithmetic, + .OpFMod => .Arithmetic, + .OpVectorTimesScalar => .Arithmetic, + .OpMatrixTimesScalar => .Arithmetic, + .OpVectorTimesMatrix => .Arithmetic, + .OpMatrixTimesVector => .Arithmetic, + .OpMatrixTimesMatrix => .Arithmetic, + .OpOuterProduct => .Arithmetic, + .OpDot => .Arithmetic, + .OpIAddCarry => .Arithmetic, + .OpISubBorrow => .Arithmetic, + .OpUMulExtended => .Arithmetic, + .OpSMulExtended => .Arithmetic, + .OpAny => .RelationalAndLogical, + .OpAll => .RelationalAndLogical, + .OpIsNan => .RelationalAndLogical, + .OpIsInf => .RelationalAndLogical, + .OpIsFinite => .RelationalAndLogical, + .OpIsNormal => .RelationalAndLogical, + .OpSignBitSet => .RelationalAndLogical, + .OpLessOrGreater => .RelationalAndLogical, + .OpOrdered => .RelationalAndLogical, + .OpUnordered => .RelationalAndLogical, + .OpLogicalEqual => .RelationalAndLogical, + .OpLogicalNotEqual => .RelationalAndLogical, + .OpLogicalOr => .RelationalAndLogical, + .OpLogicalAnd => .RelationalAndLogical, + .OpLogicalNot => .RelationalAndLogical, + .OpSelect => .RelationalAndLogical, + .OpIEqual => .RelationalAndLogical, + .OpINotEqual => .RelationalAndLogical, + .OpUGreaterThan => .RelationalAndLogical, + .OpSGreaterThan => .RelationalAndLogical, + .OpUGreaterThanEqual => .RelationalAndLogical, + .OpSGreaterThanEqual => .RelationalAndLogical, + .OpULessThan => .RelationalAndLogical, + .OpSLessThan => .RelationalAndLogical, + .OpULessThanEqual => .RelationalAndLogical, + .OpSLessThanEqual => .RelationalAndLogical, + .OpFOrdEqual => .RelationalAndLogical, + .OpFUnordEqual => .RelationalAndLogical, + .OpFOrdNotEqual => .RelationalAndLogical, + .OpFUnordNotEqual => .RelationalAndLogical, + .OpFOrdLessThan => .RelationalAndLogical, + .OpFUnordLessThan => .RelationalAndLogical, + .OpFOrdGreaterThan => .RelationalAndLogical, + .OpFUnordGreaterThan => .RelationalAndLogical, + .OpFOrdLessThanEqual => .RelationalAndLogical, + .OpFUnordLessThanEqual => .RelationalAndLogical, + .OpFOrdGreaterThanEqual => .RelationalAndLogical, + .OpFUnordGreaterThanEqual => .RelationalAndLogical, + .OpShiftRightLogical => .Bit, + .OpShiftRightArithmetic => .Bit, + .OpShiftLeftLogical => .Bit, + .OpBitwiseOr => .Bit, + .OpBitwiseXor => .Bit, + .OpBitwiseAnd => .Bit, + .OpNot => .Bit, + .OpBitFieldInsert => .Bit, + .OpBitFieldSExtract => .Bit, + .OpBitFieldUExtract => .Bit, + .OpBitReverse => .Bit, + .OpBitCount => .Bit, + .OpDPdx => .Derivative, + .OpDPdy => .Derivative, + .OpFwidth => .Derivative, + .OpDPdxFine => .Derivative, + .OpDPdyFine => .Derivative, + .OpFwidthFine => .Derivative, + .OpDPdxCoarse => .Derivative, + .OpDPdyCoarse => .Derivative, + .OpFwidthCoarse => .Derivative, + .OpEmitVertex => .Primitive, + .OpEndPrimitive => .Primitive, + .OpEmitStreamVertex => .Primitive, + .OpEndStreamPrimitive => .Primitive, + .OpControlBarrier => .Barrier, + .OpMemoryBarrier => .Barrier, + .OpAtomicLoad => .Atomic, + .OpAtomicStore => .Atomic, + .OpAtomicExchange => .Atomic, + .OpAtomicCompareExchange => .Atomic, + .OpAtomicCompareExchangeWeak => .Atomic, + .OpAtomicIIncrement => .Atomic, + .OpAtomicIDecrement => .Atomic, + .OpAtomicIAdd => .Atomic, + .OpAtomicISub => .Atomic, + .OpAtomicSMin => .Atomic, + .OpAtomicUMin => .Atomic, + .OpAtomicSMax => .Atomic, + .OpAtomicUMax => .Atomic, + .OpAtomicAnd => .Atomic, + .OpAtomicOr => .Atomic, + .OpAtomicXor => .Atomic, + .OpPhi => .ControlFlow, + .OpLoopMerge => .ControlFlow, + .OpSelectionMerge => .ControlFlow, + .OpLabel => .ControlFlow, + .OpBranch => .ControlFlow, + .OpBranchConditional => .ControlFlow, + .OpSwitch => .ControlFlow, + .OpKill => .ControlFlow, + .OpReturn => .ControlFlow, + .OpReturnValue => .ControlFlow, + .OpUnreachable => .ControlFlow, + .OpLifetimeStart => .ControlFlow, + .OpLifetimeStop => .ControlFlow, + .OpGroupAsyncCopy => .Group, + .OpGroupWaitEvents => .Group, + .OpGroupAll => .Group, + .OpGroupAny => .Group, + .OpGroupBroadcast => .Group, + .OpGroupIAdd => .Group, + .OpGroupFAdd => .Group, + .OpGroupFMin => .Group, + .OpGroupUMin => .Group, + .OpGroupSMin => .Group, + .OpGroupFMax => .Group, + .OpGroupUMax => .Group, + .OpGroupSMax => .Group, + .OpReadPipe => .Pipe, + .OpWritePipe => .Pipe, + .OpReservedReadPipe => .Pipe, + .OpReservedWritePipe => .Pipe, + .OpReserveReadPipePackets => .Pipe, + .OpReserveWritePipePackets => .Pipe, + .OpCommitReadPipe => .Pipe, + .OpCommitWritePipe => .Pipe, + .OpIsValidReserveId => .Pipe, + .OpGetNumPipePackets => .Pipe, + .OpGetMaxPipePackets => .Pipe, + .OpGroupReserveReadPipePackets => .Pipe, + .OpGroupReserveWritePipePackets => .Pipe, + .OpGroupCommitReadPipe => .Pipe, + .OpGroupCommitWritePipe => .Pipe, + .OpEnqueueMarker => .DeviceSideEnqueue, + .OpEnqueueKernel => .DeviceSideEnqueue, + .OpGetKernelNDrangeSubGroupCount => .DeviceSideEnqueue, + .OpGetKernelNDrangeMaxSubGroupSize => .DeviceSideEnqueue, + .OpGetKernelWorkGroupSize => .DeviceSideEnqueue, + .OpGetKernelPreferredWorkGroupSizeMultiple => .DeviceSideEnqueue, + .OpRetainEvent => .DeviceSideEnqueue, + .OpReleaseEvent => .DeviceSideEnqueue, + .OpCreateUserEvent => .DeviceSideEnqueue, + .OpIsValidEvent => .DeviceSideEnqueue, + .OpSetUserEventStatus => .DeviceSideEnqueue, + .OpCaptureEventProfilingInfo => .DeviceSideEnqueue, + .OpGetDefaultQueue => .DeviceSideEnqueue, + .OpBuildNDRange => .DeviceSideEnqueue, + .OpImageSparseSampleImplicitLod => .Image, + .OpImageSparseSampleExplicitLod => .Image, + .OpImageSparseSampleDrefImplicitLod => .Image, + .OpImageSparseSampleDrefExplicitLod => .Image, + .OpImageSparseSampleProjImplicitLod => .Image, + .OpImageSparseSampleProjExplicitLod => .Image, + .OpImageSparseSampleProjDrefImplicitLod => .Image, + .OpImageSparseSampleProjDrefExplicitLod => .Image, + .OpImageSparseFetch => .Image, + .OpImageSparseGather => .Image, + .OpImageSparseDrefGather => .Image, + .OpImageSparseTexelsResident => .Image, + .OpNoLine => .Debug, + .OpAtomicFlagTestAndSet => .Atomic, + .OpAtomicFlagClear => .Atomic, + .OpImageSparseRead => .Image, + .OpSizeOf => .Miscellaneous, + .OpTypePipeStorage => .TypeDeclaration, + .OpConstantPipeStorage => .Pipe, + .OpCreatePipeFromPipeStorage => .Pipe, + .OpGetKernelLocalSizeForSubgroupCount => .DeviceSideEnqueue, + .OpGetKernelMaxNumSubgroups => .DeviceSideEnqueue, + .OpTypeNamedBarrier => .TypeDeclaration, + .OpNamedBarrierInitialize => .Barrier, + .OpMemoryNamedBarrier => .Barrier, + .OpModuleProcessed => .Debug, + .OpExecutionModeId => .ModeSetting, + .OpDecorateId => .Annotation, + .OpGroupNonUniformElect => .NonUniform, + .OpGroupNonUniformAll => .NonUniform, + .OpGroupNonUniformAny => .NonUniform, + .OpGroupNonUniformAllEqual => .NonUniform, + .OpGroupNonUniformBroadcast => .NonUniform, + .OpGroupNonUniformBroadcastFirst => .NonUniform, + .OpGroupNonUniformBallot => .NonUniform, + .OpGroupNonUniformInverseBallot => .NonUniform, + .OpGroupNonUniformBallotBitExtract => .NonUniform, + .OpGroupNonUniformBallotBitCount => .NonUniform, + .OpGroupNonUniformBallotFindLSB => .NonUniform, + .OpGroupNonUniformBallotFindMSB => .NonUniform, + .OpGroupNonUniformShuffle => .NonUniform, + .OpGroupNonUniformShuffleXor => .NonUniform, + .OpGroupNonUniformShuffleUp => .NonUniform, + .OpGroupNonUniformShuffleDown => .NonUniform, + .OpGroupNonUniformIAdd => .NonUniform, + .OpGroupNonUniformFAdd => .NonUniform, + .OpGroupNonUniformIMul => .NonUniform, + .OpGroupNonUniformFMul => .NonUniform, + .OpGroupNonUniformSMin => .NonUniform, + .OpGroupNonUniformUMin => .NonUniform, + .OpGroupNonUniformFMin => .NonUniform, + .OpGroupNonUniformSMax => .NonUniform, + .OpGroupNonUniformUMax => .NonUniform, + .OpGroupNonUniformFMax => .NonUniform, + .OpGroupNonUniformBitwiseAnd => .NonUniform, + .OpGroupNonUniformBitwiseOr => .NonUniform, + .OpGroupNonUniformBitwiseXor => .NonUniform, + .OpGroupNonUniformLogicalAnd => .NonUniform, + .OpGroupNonUniformLogicalOr => .NonUniform, + .OpGroupNonUniformLogicalXor => .NonUniform, + .OpGroupNonUniformQuadBroadcast => .NonUniform, + .OpGroupNonUniformQuadSwap => .NonUniform, + .OpCopyLogical => .Composite, + .OpPtrEqual => .Memory, + .OpPtrNotEqual => .Memory, + .OpPtrDiff => .Memory, + .OpTerminateInvocation => .ControlFlow, + .OpSubgroupBallotKHR => .Group, + .OpSubgroupFirstInvocationKHR => .Group, + .OpSubgroupAllKHR => .Group, + .OpSubgroupAnyKHR => .Group, + .OpSubgroupAllEqualKHR => .Group, + .OpSubgroupReadInvocationKHR => .Group, + .OpTraceRayKHR => .Reserved, + .OpExecuteCallableKHR => .Reserved, + .OpConvertUToAccelerationStructureKHR => .Reserved, + .OpIgnoreIntersectionKHR => .Reserved, + .OpTerminateRayKHR => .Reserved, + .OpSDot => .Arithmetic, + .OpUDot => .Arithmetic, + .OpSUDot => .Arithmetic, + .OpSDotAccSat => .Arithmetic, + .OpUDotAccSat => .Arithmetic, + .OpSUDotAccSat => .Arithmetic, + .OpTypeRayQueryKHR => .Reserved, + .OpRayQueryInitializeKHR => .Reserved, + .OpRayQueryTerminateKHR => .Reserved, + .OpRayQueryGenerateIntersectionKHR => .Reserved, + .OpRayQueryConfirmIntersectionKHR => .Reserved, + .OpRayQueryProceedKHR => .Reserved, + .OpRayQueryGetIntersectionTypeKHR => .Reserved, + .OpGroupIAddNonUniformAMD => .Group, + .OpGroupFAddNonUniformAMD => .Group, + .OpGroupFMinNonUniformAMD => .Group, + .OpGroupUMinNonUniformAMD => .Group, + .OpGroupSMinNonUniformAMD => .Group, + .OpGroupFMaxNonUniformAMD => .Group, + .OpGroupUMaxNonUniformAMD => .Group, + .OpGroupSMaxNonUniformAMD => .Group, + .OpFragmentMaskFetchAMD => .Reserved, + .OpFragmentFetchAMD => .Reserved, + .OpReadClockKHR => .Reserved, + .OpImageSampleFootprintNV => .Image, + .OpGroupNonUniformPartitionNV => .NonUniform, + .OpWritePackedPrimitiveIndices4x8NV => .Reserved, + .OpReportIntersectionKHR => .Reserved, + .OpIgnoreIntersectionNV => .Reserved, + .OpTerminateRayNV => .Reserved, + .OpTraceNV => .Reserved, + .OpTraceMotionNV => .Reserved, + .OpTraceRayMotionNV => .Reserved, + .OpTypeAccelerationStructureKHR => .Reserved, + .OpExecuteCallableNV => .Reserved, + .OpTypeCooperativeMatrixNV => .Reserved, + .OpCooperativeMatrixLoadNV => .Reserved, + .OpCooperativeMatrixStoreNV => .Reserved, + .OpCooperativeMatrixMulAddNV => .Reserved, + .OpCooperativeMatrixLengthNV => .Reserved, + .OpBeginInvocationInterlockEXT => .Reserved, + .OpEndInvocationInterlockEXT => .Reserved, + .OpDemoteToHelperInvocation => .ControlFlow, + .OpIsHelperInvocationEXT => .Reserved, + .OpConvertUToImageNV => .Reserved, + .OpConvertUToSamplerNV => .Reserved, + .OpConvertImageToUNV => .Reserved, + .OpConvertSamplerToUNV => .Reserved, + .OpConvertUToSampledImageNV => .Reserved, + .OpConvertSampledImageToUNV => .Reserved, + .OpSamplerImageAddressingModeNV => .Reserved, + .OpSubgroupShuffleINTEL => .Group, + .OpSubgroupShuffleDownINTEL => .Group, + .OpSubgroupShuffleUpINTEL => .Group, + .OpSubgroupShuffleXorINTEL => .Group, + .OpSubgroupBlockReadINTEL => .Group, + .OpSubgroupBlockWriteINTEL => .Group, + .OpSubgroupImageBlockReadINTEL => .Group, + .OpSubgroupImageBlockWriteINTEL => .Group, + .OpSubgroupImageMediaBlockReadINTEL => .Group, + .OpSubgroupImageMediaBlockWriteINTEL => .Group, + .OpUCountLeadingZerosINTEL => .Reserved, + .OpUCountTrailingZerosINTEL => .Reserved, + .OpAbsISubINTEL => .Reserved, + .OpAbsUSubINTEL => .Reserved, + .OpIAddSatINTEL => .Reserved, + .OpUAddSatINTEL => .Reserved, + .OpIAverageINTEL => .Reserved, + .OpUAverageINTEL => .Reserved, + .OpIAverageRoundedINTEL => .Reserved, + .OpUAverageRoundedINTEL => .Reserved, + .OpISubSatINTEL => .Reserved, + .OpUSubSatINTEL => .Reserved, + .OpIMul32x16INTEL => .Reserved, + .OpUMul32x16INTEL => .Reserved, + .OpAtomicFMinEXT => .Atomic, + .OpAtomicFMaxEXT => .Atomic, + .OpAssumeTrueKHR => .Miscellaneous, + .OpExpectKHR => .Miscellaneous, + .OpDecorateString => .Annotation, + .OpMemberDecorateString => .Annotation, + .OpLoopControlINTEL => .Reserved, + .OpReadPipeBlockingINTEL => .Pipe, + .OpWritePipeBlockingINTEL => .Pipe, + .OpFPGARegINTEL => .Reserved, + .OpRayQueryGetRayTMinKHR => .Reserved, + .OpRayQueryGetRayFlagsKHR => .Reserved, + .OpRayQueryGetIntersectionTKHR => .Reserved, + .OpRayQueryGetIntersectionInstanceCustomIndexKHR => .Reserved, + .OpRayQueryGetIntersectionInstanceIdKHR => .Reserved, + .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => .Reserved, + .OpRayQueryGetIntersectionGeometryIndexKHR => .Reserved, + .OpRayQueryGetIntersectionPrimitiveIndexKHR => .Reserved, + .OpRayQueryGetIntersectionBarycentricsKHR => .Reserved, + .OpRayQueryGetIntersectionFrontFaceKHR => .Reserved, + .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => .Reserved, + .OpRayQueryGetIntersectionObjectRayDirectionKHR => .Reserved, + .OpRayQueryGetIntersectionObjectRayOriginKHR => .Reserved, + .OpRayQueryGetWorldRayDirectionKHR => .Reserved, + .OpRayQueryGetWorldRayOriginKHR => .Reserved, + .OpRayQueryGetIntersectionObjectToWorldKHR => .Reserved, + .OpRayQueryGetIntersectionWorldToObjectKHR => .Reserved, + .OpAtomicFAddEXT => .Atomic, + .OpTypeBufferSurfaceINTEL => .TypeDeclaration, + .OpTypeStructContinuedINTEL => .TypeDeclaration, + .OpConstantCompositeContinuedINTEL => .ConstantCreation, + .OpSpecConstantCompositeContinuedINTEL => .ConstantCreation, + }; + } }; pub const ImageOperands = packed struct { Bias: bool = false, @@ -1220,9 +5253,9 @@ pub const ImageOperands = packed struct { VolatileTexel: bool = false, SignExtend: bool = false, ZeroExtend: bool = false, - _reserved_bit_14: bool = false, + Nontemporal: bool = false, _reserved_bit_15: bool = false, - _reserved_bit_16: bool = false, + Offsets: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, @@ -1259,9 +5292,9 @@ pub const ImageOperands = packed struct { VolatileTexel: bool = false, SignExtend: bool = false, ZeroExtend: bool = false, - _reserved_bit_14: bool = false, + Nontemporal: bool = false, _reserved_bit_15: bool = false, - _reserved_bit_16: bool = false, + Offsets: ?struct { id_ref: IdRef } = null, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, @@ -1433,7 +5466,7 @@ pub const FunctionControl = packed struct { _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, - _reserved_bit_16: bool = false, + OptNoneINTEL: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, @@ -1670,6 +5703,7 @@ pub const SourceLanguage = enum(u32) { OpenCL_C = 3, OpenCL_CPP = 4, HLSL = 5, + CPP_for_OpenCL = 6, }; pub const ExecutionModel = enum(u32) { Vertex = 0, @@ -1750,6 +5784,7 @@ pub const ExecutionMode = enum(u32) { SubgroupsPerWorkgroupId = 37, LocalSizeId = 38, LocalSizeHintId = 39, + SubgroupUniformControlFlowKHR = 4421, PostDepthCoverage = 4446, DenormPreserve = 4459, DenormFlushToZero = 4460, @@ -1817,7 +5852,8 @@ pub const ExecutionMode = enum(u32) { SubgroupsPerWorkgroup: struct { subgroups_per_workgroup: LiteralInteger }, SubgroupsPerWorkgroupId: struct { subgroups_per_workgroup: IdRef }, LocalSizeId: struct { x_size: IdRef, y_size: IdRef, z_size: IdRef }, - LocalSizeHintId: struct { local_size_hint: IdRef }, + LocalSizeHintId: struct { x_size_hint: IdRef, y_size_hint: IdRef, z_size_hint: IdRef }, + SubgroupUniformControlFlowKHR, PostDepthCoverage, DenormPreserve: struct { target_width: LiteralInteger }, DenormFlushToZero: struct { target_width: LiteralInteger }, @@ -1996,10 +6032,26 @@ pub const FPDenormMode = enum(u32) { Preserve = 0, FlushToZero = 1, }; +pub const QuantizationModes = enum(u32) { + TRN = 0, + TRN_ZERO = 1, + RND = 2, + RND_ZERO = 3, + RND_INF = 4, + RND_MIN_INF = 5, + RND_CONV = 6, + RND_CONV_ODD = 7, +}; pub const FPOperationMode = enum(u32) { IEEE = 0, ALT = 1, }; +pub const OverflowModes = enum(u32) { + WRAP = 0, + SAT = 1, + SAT_ZERO = 2, + SAT_SYM = 3, +}; pub const LinkageType = enum(u32) { Export = 0, Import = 1, @@ -2078,10 +6130,14 @@ pub const Decoration = enum(u32) { PerPrimitiveNV = 5271, PerViewNV = 5272, PerTaskNV = 5273, - PerVertexNV = 5285, + PerVertexKHR = 5285, NonUniform = 5300, RestrictPointer = 5355, AliasedPointer = 5356, + BindlessSamplerNV = 5398, + BindlessImageNV = 5399, + BoundSamplerNV = 5400, + BoundImageNV = 5401, SIMTCallINTEL = 5599, ReferencedIndirectlyINTEL = 5602, ClobberINTEL = 5607, @@ -2119,7 +6175,9 @@ pub const Decoration = enum(u32) { FunctionFloatingPointModeINTEL = 6080, SingleElementVectorINTEL = 6085, VectorComputeCallableFunctionINTEL = 6087, + MediaBlockIOINTEL = 6140, + pub const PerVertexNV = Decoration.PerVertexKHR; pub const NonUniformEXT = Decoration.NonUniform; pub const RestrictPointerEXT = Decoration.RestrictPointer; pub const AliasedPointerEXT = Decoration.AliasedPointer; @@ -2184,10 +6242,14 @@ pub const Decoration = enum(u32) { PerPrimitiveNV, PerViewNV, PerTaskNV, - PerVertexNV, + PerVertexKHR, NonUniform, RestrictPointer, AliasedPointer, + BindlessSamplerNV, + BindlessImageNV, + BoundSamplerNV, + BoundImageNV, SIMTCallINTEL: struct { n: LiteralInteger }, ReferencedIndirectlyINTEL, ClobberINTEL: struct { register: LiteralString }, @@ -2225,6 +6287,7 @@ pub const Decoration = enum(u32) { FunctionFloatingPointModeINTEL: struct { target_width: LiteralInteger, fp_operation_mode: FPOperationMode }, SingleElementVectorINTEL, VectorComputeCallableFunctionINTEL, + MediaBlockIOINTEL, }; }; pub const BuiltIn = enum(u32) { @@ -2303,8 +6366,8 @@ pub const BuiltIn = enum(u32) { LayerPerViewNV = 5279, MeshViewCountNV = 5280, MeshViewIndicesNV = 5281, - BaryCoordNV = 5286, - BaryCoordNoPerspNV = 5287, + BaryCoordKHR = 5286, + BaryCoordNoPerspKHR = 5287, FragSizeEXT = 5292, FragInvocationCountEXT = 5293, LaunchIdKHR = 5319, @@ -2320,6 +6383,7 @@ pub const BuiltIn = enum(u32) { WorldToObjectKHR = 5331, HitTNV = 5332, HitKindKHR = 5333, + CurrentRayTimeNV = 5334, IncomingRayFlagsKHR = 5351, RayGeometryIndexKHR = 5352, WarpsPerSMNV = 5374, @@ -2332,6 +6396,8 @@ pub const BuiltIn = enum(u32) { pub const SubgroupGtMaskKHR = BuiltIn.SubgroupGtMask; pub const SubgroupLeMaskKHR = BuiltIn.SubgroupLeMask; pub const SubgroupLtMaskKHR = BuiltIn.SubgroupLtMask; + pub const BaryCoordNV = BuiltIn.BaryCoordKHR; + pub const BaryCoordNoPerspNV = BuiltIn.BaryCoordNoPerspKHR; pub const FragmentSizeNV = BuiltIn.FragSizeEXT; pub const InvocationsPerPixelNV = BuiltIn.FragInvocationCountEXT; pub const LaunchIdNV = BuiltIn.LaunchIdKHR; @@ -2443,6 +6509,7 @@ pub const Capability = enum(u32) { GroupNonUniformQuad = 68, ShaderLayer = 69, ShaderViewportIndex = 70, + UniformDecoration = 71, FragmentShadingRateKHR = 4422, SubgroupBallotKHR = 4423, DrawParameters = 4427, @@ -2488,7 +6555,7 @@ pub const Capability = enum(u32) { FragmentFullyCoveredEXT = 5265, MeshShadingNV = 5266, ImageFootprintNV = 5282, - FragmentBarycentricNV = 5284, + FragmentBarycentricKHR = 5284, ComputeDerivativeGroupQuadsNV = 5288, FragmentDensityEXT = 5291, GroupNonUniformPartitionedNV = 5297, @@ -2505,6 +6572,7 @@ pub const Capability = enum(u32) { UniformTexelBufferArrayNonUniformIndexing = 5311, StorageTexelBufferArrayNonUniformIndexing = 5312, RayTracingNV = 5340, + RayTracingMotionBlurNV = 5341, VulkanMemoryModel = 5345, VulkanMemoryModelDeviceScope = 5346, PhysicalStorageBufferAddresses = 5347, @@ -2515,7 +6583,8 @@ pub const Capability = enum(u32) { FragmentShaderShadingRateInterlockEXT = 5372, ShaderSMBuiltinsNV = 5373, FragmentShaderPixelInterlockEXT = 5378, - DemoteToHelperInvocationEXT = 5379, + DemoteToHelperInvocation = 5379, + BindlessTextureNV = 5390, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, SubgroupImageBlockIOINTEL = 5570, @@ -2540,6 +6609,7 @@ pub const Capability = enum(u32) { FPGAMemoryAttributesINTEL = 5824, FPFastMathModeINTEL = 5837, ArbitraryPrecisionIntegersINTEL = 5844, + ArbitraryPrecisionFloatingPointINTEL = 5845, UnstructuredLoopControlsINTEL = 5886, FPGALoopControlsINTEL = 5888, KernelAttributesINTEL = 5892, @@ -2548,17 +6618,27 @@ pub const Capability = enum(u32) { FPGAClusterAttributesINTEL = 5904, LoopFuseINTEL = 5906, FPGABufferLocationINTEL = 5920, + ArbitraryPrecisionFixedPointINTEL = 5922, USMStorageClassesINTEL = 5935, IOPipesINTEL = 5943, BlockingPipesINTEL = 5945, FPGARegINTEL = 5948, + DotProductInputAll = 6016, + DotProductInput4x8Bit = 6017, + DotProductInput4x8BitPacked = 6018, + DotProduct = 6019, + BitInstructions = 6025, AtomicFloat32AddEXT = 6033, AtomicFloat64AddEXT = 6034, LongConstantCompositeINTEL = 6089, + OptNoneINTEL = 6094, + AtomicFloat16AddEXT = 6095, + DebugInfoModuleINTEL = 6114, pub const StorageUniformBufferBlock16 = Capability.StorageBuffer16BitAccess; pub const StorageUniform16 = Capability.UniformAndStorageBuffer16BitAccess; pub const ShaderViewportIndexLayerNV = Capability.ShaderViewportIndexLayerEXT; + pub const FragmentBarycentricNV = Capability.FragmentBarycentricKHR; pub const ShadingRateNV = Capability.FragmentDensityEXT; pub const ShaderNonUniformEXT = Capability.ShaderNonUniform; pub const RuntimeDescriptorArrayEXT = Capability.RuntimeDescriptorArray; @@ -2575,6 +6655,11 @@ pub const Capability = enum(u32) { pub const VulkanMemoryModelKHR = Capability.VulkanMemoryModel; pub const VulkanMemoryModelDeviceScopeKHR = Capability.VulkanMemoryModelDeviceScope; pub const PhysicalStorageBufferAddressesEXT = Capability.PhysicalStorageBufferAddresses; + pub const DemoteToHelperInvocationEXT = Capability.DemoteToHelperInvocation; + pub const DotProductInputAllKHR = Capability.DotProductInputAll; + pub const DotProductInput4x8BitKHR = Capability.DotProductInput4x8Bit; + pub const DotProductInput4x8BitPackedKHR = Capability.DotProductInput4x8BitPacked; + pub const DotProductKHR = Capability.DotProduct; }; pub const RayQueryIntersection = enum(u32) { RayQueryCandidateIntersectionKHR = 0, @@ -2589,3 +6674,8 @@ pub const RayQueryCandidateIntersectionType = enum(u32) { RayQueryCandidateIntersectionTriangleKHR = 0, RayQueryCandidateIntersectionAABBKHR = 1, }; +pub const PackedVectorFormat = enum(u32) { + PackedVectorFormat4x8Bit = 0, + + pub const PackedVectorFormat4x8BitKHR = PackedVectorFormat.PackedVectorFormat4x8Bit; +}; diff --git a/src/glibc.zig b/src/glibc.zig index 87e713de34..75640faa4d 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -653,6 +653,9 @@ pub fn buildSharedObjects(comp: *Compilation) !void { .gpa = comp.gpa, .manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}), }; + cache.addPrefix(.{ .path = null, .handle = fs.cwd() }); + cache.addPrefix(comp.zig_lib_directory); + cache.addPrefix(comp.global_cache_directory); defer cache.manifest_dir.close(); var man = cache.obtain(); diff --git a/src/libcxx.zig b/src/libcxx.zig index 850da698c5..7ca405cf15 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -187,15 +187,6 @@ pub fn buildLibCXX(comp: *Compilation) !void { try cflags.append("-faligned-allocation"); } - try cflags.append("-I"); - try cflags.append(cxx_include_path); - - try cflags.append("-I"); - try cflags.append(cxxabi_include_path); - - try cflags.append("-I"); - try cflags.append(cxx_src_include_path); - if (target_util.supports_fpic(target)) { try cflags.append("-fPIC"); } @@ -203,9 +194,24 @@ pub fn buildLibCXX(comp: *Compilation) !void { try cflags.append("-std=c++20"); try cflags.append("-Wno-user-defined-literals"); + // These depend on only the zig lib directory file path, which is + // purposefully either in the cache or not in the cache. The decision + // should not be overridden here. + var cache_exempt_flags = std.ArrayList([]const u8).init(arena); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxx_include_path); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxxabi_include_path); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxx_src_include_path); + c_source_files.appendAssumeCapacity(.{ .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", cxx_src }), .extra_flags = cflags.items, + .cache_exempt_flags = cache_exempt_flags.items, }); } @@ -340,15 +346,6 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC"); } - try cflags.append("-I"); - try cflags.append(cxxabi_include_path); - - try cflags.append("-I"); - try cflags.append(cxx_include_path); - - try cflags.append("-I"); - try cflags.append(cxx_src_include_path); - if (target_util.supports_fpic(target)) { try cflags.append("-fPIC"); } @@ -357,9 +354,24 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { try cflags.append("-funwind-tables"); try cflags.append("-std=c++20"); + // These depend on only the zig lib directory file path, which is + // purposefully either in the cache or not in the cache. The decision + // should not be overridden here. + var cache_exempt_flags = std.ArrayList([]const u8).init(arena); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxxabi_include_path); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxx_include_path); + + try cache_exempt_flags.append("-I"); + try cache_exempt_flags.append(cxx_src_include_path); + c_source_files.appendAssumeCapacity(.{ .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", cxxabi_src }), .extra_flags = cflags.items, + .cache_exempt_flags = cache_exempt_flags.items, }); } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 695df33930..fd708f794f 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -58,13 +58,13 @@ decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclGenContext) = . const DeclGenContext = struct { air: Air, - air_value_arena: ArenaAllocator.State, + air_arena: ArenaAllocator.State, liveness: Liveness, fn deinit(self: *DeclGenContext, gpa: Allocator) void { self.air.deinit(gpa); self.liveness.deinit(gpa); - self.air_value_arena.promote(gpa).deinit(); + self.air_arena.promote(gpa).deinit(); self.* = undefined; } }; @@ -140,7 +140,7 @@ pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liv result.value_ptr.* = .{ .air = new_air, - .air_value_arena = arena.state, + .air_arena = arena.state, .liveness = new_liveness, }; } @@ -167,13 +167,13 @@ pub fn updateDeclExports( } pub fn freeDecl(self: *SpirV, decl_index: Module.Decl.Index) void { - const index = self.decl_table.getIndex(decl_index).?; - const module = self.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.val.tag() == .function) { - self.decl_table.values()[index].deinit(self.base.allocator); + if (self.decl_table.getIndex(decl_index)) |index| { + const module = self.base.options.module.?; + const decl = module.declPtr(decl_index); + if (decl.val.tag() == .function) { + self.decl_table.values()[index].deinit(self.base.allocator); + } } - self.decl_table.swapRemoveAt(index); } pub fn flush(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void { @@ -218,7 +218,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No } // Now, actually generate the code for all declarations. - var decl_gen = codegen.DeclGen.init(module, &spv); + var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv); defer decl_gen.deinit(); var it = self.decl_table.iterator(); @@ -245,16 +245,18 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No fn writeCapabilities(spv: *SpvModule, target: std.Target) !void { // TODO: Integrate with a hypothetical feature system - const cap: spec.Capability = switch (target.os.tag) { - .opencl => .Kernel, - .glsl450 => .Shader, - .vulkan => .VulkanMemoryModel, + const caps: []const spec.Capability = switch (target.os.tag) { + .opencl => &.{.Kernel}, + .glsl450 => &.{.Shader}, + .vulkan => &.{.Shader}, else => unreachable, // TODO }; - try spv.sections.capabilities.emit(spv.gpa, .OpCapability, .{ - .capability = cap, - }); + for (caps) |cap| { + try spv.sections.capabilities.emit(spv.gpa, .OpCapability, .{ + .capability = cap, + }); + } } fn writeMemoryModel(spv: *SpvModule, target: std.Target) !void { @@ -271,7 +273,7 @@ fn writeMemoryModel(spv: *SpvModule, target: std.Target) !void { const memory_model: spec.MemoryModel = switch (target.os.tag) { .opencl => .OpenCL, .glsl450 => .GLSL450, - .vulkan => .Vulkan, + .vulkan => .GLSL450, else => unreachable, }; @@ -296,17 +298,27 @@ fn cloneLiveness(l: Liveness, gpa: Allocator) !Liveness { }; } -fn cloneAir(air: Air, gpa: Allocator, value_arena: Allocator) !Air { +fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air { const values = try gpa.alloc(Value, air.values.len); errdefer gpa.free(values); for (values) |*value, i| { - value.* = try air.values[i].copy(value_arena); + value.* = try air.values[i].copy(air_arena); } var instructions = try air.instructions.toMultiArrayList().clone(gpa); errdefer instructions.deinit(gpa); + const air_tags = instructions.items(.tag); + const air_datas = instructions.items(.data); + + for (air_tags) |tag, i| { + switch (tag) { + .arg, .alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena), + else => {}, + } + } + return Air{ .instructions = instructions.slice(), .extra = try gpa.dupe(u32, air.extra), diff --git a/src/main.zig b/src/main.zig index 24518d743d..f5855da6b9 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2744,11 +2744,14 @@ fn buildOutputType( } const self_exe_path = try introspect.findZigExePath(arena); - var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ - .path = lib_dir, - .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { - fatal("unable to open zig lib directory '{s}': {s}", .{ lib_dir, @errorName(err) }); - }, + var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |unresolved_lib_dir| l: { + const lib_dir = try fs.path.resolve(arena, &.{unresolved_lib_dir}); + break :l .{ + .path = lib_dir, + .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { + fatal("unable to open zig lib directory '{s}': {s}", .{ lib_dir, @errorName(err) }); + }, + }; } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)}); }; diff --git a/src/mingw.zig b/src/mingw.zig index 906d0a790d..79c4327c4c 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -302,6 +302,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { .gpa = comp.gpa, .manifest_dir = comp.cache_parent.manifest_dir, }; + for (comp.cache_parent.prefixes()) |prefix| { + cache.addPrefix(prefix); + } + cache.hash.addBytes(build_options.version); cache.hash.addOptionalBytes(comp.zig_lib_directory.path); cache.hash.add(target.cpu.arch); diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index 4371f8d523..f29110b94b 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -99,7 +99,7 @@ enum AddressSpace { AddressSpaceConstant, AddressSpaceParam, AddressSpaceShared, - AddressSpaceLocal + AddressSpaceLocal, }; // This one corresponds to the builtin.zig enum. diff --git a/src/target.zig b/src/target.zig index 90cc50db23..debcfb3776 100644 --- a/src/target.zig +++ b/src/target.zig @@ -729,3 +729,43 @@ pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend else => return false, } } + +pub fn libcFloatPrefix(float_bits: u16) []const u8 { + return switch (float_bits) { + 16, 80 => "__", + 32, 64, 128 => "", + else => unreachable, + }; +} + +pub fn libcFloatSuffix(float_bits: u16) []const u8 { + return switch (float_bits) { + 16 => "h", // Non-standard + 32 => "f", + 64 => "", + 80 => "x", // Non-standard + 128 => "q", // Non-standard (mimics convention in GCC libquadmath) + else => unreachable, + }; +} + +pub fn compilerRtFloatAbbrev(float_bits: u16) []const u8 { + return switch (float_bits) { + 16 => "h", + 32 => "s", + 64 => "d", + 80 => "x", + 128 => "t", + else => unreachable, + }; +} + +pub fn compilerRtIntAbbrev(bits: u16) []const u8 { + return switch (bits) { + 16 => "h", + 32 => "s", + 64 => "d", + 128 => "t", + else => "o", // Non-standard + }; +} diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 36a51d8275..e756264418 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -13,7 +13,6 @@ test "implicit cast vector to array - bool" { } if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -294,7 +293,6 @@ test "vector @splat" { test "load vector elements via comptime index" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -317,7 +315,6 @@ test "load vector elements via comptime index" { test "store vector elements via comptime index" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -346,7 +343,6 @@ test "store vector elements via comptime index" { test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -369,7 +365,6 @@ test "load vector elements via runtime index" { test "store vector elements via runtime index" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -393,7 +388,6 @@ test "store vector elements via runtime index" { test "initialize vector which is a struct field" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -975,7 +969,6 @@ test "@addWithOverflow" { return error.SkipZigTest; } if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1018,7 +1011,6 @@ test "@subWithOverflow" { return error.SkipZigTest; } if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1049,7 +1041,6 @@ test "@mulWithOverflow" { return error.SkipZigTest; } if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1072,7 +1063,6 @@ test "@shlWithOverflow" { return error.SkipZigTest; } if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1098,7 +1088,6 @@ test "alignment of vectors" { test "loading the second vector from a slice of vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1115,7 +1104,6 @@ test "loading the second vector from a slice of vectors" { test "array of vectors is copied" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index 619143d883..426276122c 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -116,6 +116,31 @@ fn render(writer: anytype, allocator: Allocator, registry: g.CoreRegistry) !void \\pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger }; \\pub const PairIdRefIdRef = [2]IdRef; \\ + \\pub const Quantifier = enum { + \\ required, + \\ optional, + \\ variadic, + \\}; + \\ + \\pub const Operand = struct { + \\ kind: OperandKind, + \\ quantifier: Quantifier, + \\}; + \\ + \\pub const OperandCategory = enum { + \\ bit_enum, + \\ value_enum, + \\ id, + \\ literal, + \\ composite, + \\}; + \\ + \\pub const Enumerant = struct { + \\ name: []const u8, + \\ value: Word, + \\ parameters: []const OperandKind, + \\}; + \\ \\ ); @@ -123,14 +148,118 @@ fn render(writer: anytype, allocator: Allocator, registry: g.CoreRegistry) !void \\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }}; \\pub const magic_number: Word = {s}; \\ + \\ , .{ registry.major_version, registry.minor_version, registry.revision, registry.magic_number }, ); + const extended_structs = try extendedStructs(allocator, registry.operand_kinds); + try renderClass(writer, allocator, registry.instructions); + try renderOperandKind(writer, registry.operand_kinds); try renderOpcodes(writer, allocator, registry.instructions, extended_structs); try renderOperandKinds(writer, allocator, registry.operand_kinds, extended_structs); } +fn renderClass(writer: anytype, allocator: Allocator, instructions: []const g.Instruction) !void { + var class_map = std.StringArrayHashMap(void).init(allocator); + + for (instructions) |inst| { + if (std.mem.eql(u8, inst.class.?, "@exclude")) { + continue; + } + try class_map.put(inst.class.?, {}); + } + + try writer.writeAll("pub const Class = enum {\n"); + for (class_map.keys()) |class| { + try renderInstructionClass(writer, class); + try writer.writeAll(",\n"); + } + try writer.writeAll("};\n"); +} + +fn renderInstructionClass(writer: anytype, class: []const u8) !void { + // Just assume that these wont clobber zig builtin types. + var prev_was_sep = true; + for (class) |c| { + switch (c) { + '-', '_' => prev_was_sep = true, + else => if (prev_was_sep) { + try writer.writeByte(std.ascii.toUpper(c)); + prev_was_sep = false; + } else { + try writer.writeByte(std.ascii.toLower(c)); + }, + } + } +} + +fn renderOperandKind(writer: anytype, operands: []const g.OperandKind) !void { + try writer.writeAll("pub const OperandKind = enum {\n"); + for (operands) |operand| { + try writer.print("{},\n", .{std.zig.fmtId(operand.kind)}); + } + try writer.writeAll( + \\ + \\pub fn category(self: OperandKind) OperandCategory { + \\return switch (self) { + \\ + ); + for (operands) |operand| { + const cat = switch (operand.category) { + .BitEnum => "bit_enum", + .ValueEnum => "value_enum", + .Id => "id", + .Literal => "literal", + .Composite => "composite", + }; + try writer.print(".{} => .{s},\n", .{ std.zig.fmtId(operand.kind), cat }); + } + try writer.writeAll( + \\}; + \\} + \\pub fn enumerants(self: OperandKind) []const Enumerant { + \\return switch (self) { + \\ + ); + for (operands) |operand| { + switch (operand.category) { + .BitEnum, .ValueEnum => {}, + else => { + try writer.print(".{} => unreachable,\n", .{std.zig.fmtId(operand.kind)}); + continue; + }, + } + + try writer.print(".{} => &[_]Enumerant{{", .{std.zig.fmtId(operand.kind)}); + for (operand.enumerants.?) |enumerant| { + if (enumerant.value == .bitflag and std.mem.eql(u8, enumerant.enumerant, "None")) { + continue; + } + try renderEnumerant(writer, enumerant); + try writer.writeAll(","); + } + try writer.writeAll("},\n"); + } + try writer.writeAll("};\n}\n};\n"); +} + +fn renderEnumerant(writer: anytype, enumerant: g.Enumerant) !void { + try writer.print(".{{.name = \"{s}\", .value = ", .{enumerant.enumerant}); + switch (enumerant.value) { + .bitflag => |flag| try writer.writeAll(flag), + .int => |int| try writer.print("{}", .{int}), + } + try writer.writeAll(", .parameters = &[_]OperandKind{"); + for (enumerant.parameters) |param, i| { + if (i != 0) + try writer.writeAll(", "); + // Note, param.quantifier will always be one. + try writer.print(".{}", .{std.zig.fmtId(param.kind)}); + } + try writer.writeAll("}}"); +} + fn renderOpcodes( writer: anytype, allocator: Allocator, @@ -144,6 +273,9 @@ fn renderOpcodes( try aliases.ensureTotalCapacity(instructions.len); for (instructions) |inst, i| { + if (std.mem.eql(u8, inst.class.?, "@exclude")) { + continue; + } const result = inst_map.getOrPutAssumeCapacity(inst.opcode); if (!result.found_existing) { result.value_ptr.* = i; @@ -192,6 +324,47 @@ fn renderOpcodes( const inst = instructions[i]; try renderOperand(writer, .instruction, inst.opname, inst.operands, extended_structs); } + + try writer.writeAll( + \\}; + \\} + \\pub fn operands(self: Opcode) []const Operand { + \\return switch (self) { + \\ + ); + + for (instructions_indices) |i| { + const inst = instructions[i]; + try writer.print(".{} => &[_]Operand{{", .{std.zig.fmtId(inst.opname)}); + for (inst.operands) |operand| { + const quantifier = if (operand.quantifier) |q| + switch (q) { + .@"?" => "optional", + .@"*" => "variadic", + } + else + "required"; + + try writer.print(".{{.kind = .{s}, .quantifier = .{s}}},", .{ operand.kind, quantifier }); + } + try writer.writeAll("},\n"); + } + + try writer.writeAll( + \\}; + \\} + \\pub fn class(self: Opcode) Class { + \\return switch (self) { + \\ + ); + + for (instructions_indices) |i| { + const inst = instructions[i]; + try writer.print(".{} => .", .{std.zig.fmtId(inst.opname)}); + try renderInstructionClass(writer, inst.class.?); + try writer.writeAll(",\n"); + } + try writer.writeAll("};\n}\n};\n"); } @@ -298,7 +471,7 @@ fn renderBitEnum( for (enumerants) |enumerant, i| { if (enumerant.value != .bitflag) return error.InvalidRegistry; const value = try parseHexInt(enumerant.value.bitflag); - if (@popCount(value) == 0) { + if (value == 0) { continue; // Skip 'none' items } diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig index 8d20039fa1..a7eb3c18f9 100644 --- a/tools/update_spirv_features.zig +++ b/tools/update_spirv_features.zig @@ -117,7 +117,10 @@ pub fn main() !void { try w.writeAll( \\}; \\ - \\pub usingnamespace CpuFeature.feature_set_fns(Feature); + \\pub const featureSet = CpuFeature.feature_set_fns(Feature).featureSet; + \\pub const featureSetHas = CpuFeature.feature_set_fns(Feature).featureSetHas; + \\pub const featureSetHasAny = CpuFeature.feature_set_fns(Feature).featureSetHasAny; + \\pub const featureSetHasAll = CpuFeature.feature_set_fns(Feature).featureSetHasAll; \\ \\pub const all_features = blk: { \\ @setEvalBranchQuota(2000); |
