aboutsummaryrefslogtreecommitdiff
path: root/test/behavior/math.zig
diff options
context:
space:
mode:
authorAli Cheraghi <alichraghi@proton.me>2025-06-16 01:16:15 +0330
committerAli Cheraghi <alichraghi@proton.me>2025-06-16 13:22:19 +0330
commit872f68c9cbef44817fdbd57e2b1fb90b5c2ddd8c (patch)
treef5103c5acad66895724eef94513a9300953fe615 /test/behavior/math.zig
parentc126d6554fbe18630f4237d0b8e6b0783ebee394 (diff)
downloadzig-872f68c9cbef44817fdbd57e2b1fb90b5c2ddd8c.tar.gz
zig-872f68c9cbef44817fdbd57e2b1fb90b5c2ddd8c.zip
rename spirv backend name
`stage2_spirv64` -> `stage2_spirv`
Diffstat (limited to 'test/behavior/math.zig')
-rw-r--r--test/behavior/math.zig84
1 files changed, 42 insertions, 42 deletions
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index d6d169b0cc..83f18d8909 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -66,7 +66,7 @@ test "@clz" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testClz();
try comptime testClz();
@@ -84,7 +84,7 @@ test "@clz big ints" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzBigInts();
@@ -105,7 +105,7 @@ test "@clz vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzVectors();
@@ -151,7 +151,7 @@ test "@ctz" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testCtz();
try comptime testCtz();
@@ -174,7 +174,7 @@ test "@ctz 128-bit integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCtz128();
@@ -194,7 +194,7 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testCtzVectors();
try comptime testCtzVectors();
@@ -394,7 +394,7 @@ fn not(comptime T: type, a: T) T {
test "binary not" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try expect(not(u0, 0) == 0);
try expect(not(u1, 0) == 1);
@@ -437,7 +437,7 @@ test "binary not big int <= 128 bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try expect(not(u65, 1) == 0x1_FFFFFFFF_FFFFFFFE);
@@ -472,7 +472,7 @@ test "division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
@@ -594,7 +594,7 @@ test "large integer division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{
@@ -678,7 +678,7 @@ fn testSignedWrappingEval(x: i32) !void {
}
test "signed negation wrapping" {
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testSignedNegationWrappingEval(minInt(i16));
try comptime testSignedNegationWrappingEval(minInt(i16));
@@ -756,7 +756,7 @@ test "f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -784,7 +784,7 @@ fn should_not_be_zero(x: f128) !void {
test "umax wrapped squaring" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
{
var x: u4 = maxInt(u4);
@@ -842,7 +842,7 @@ test "128-bit multiplication" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -897,7 +897,7 @@ test "@addWithOverflow > 64 bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
try testAddWithOverflow(u65, 4, 105, 109, 0);
try testAddWithOverflow(u65, 1000, 100, 1100, 0);
@@ -978,7 +978,7 @@ test "extensive @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testMulWithOverflow(u5, 3, 10, 30, 0);
try testMulWithOverflow(u5, 3, 11, 1, 1);
@@ -1051,7 +1051,7 @@ test "@mulWithOverflow bitsize 128 bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testMulWithOverflow(u128, 3, 0x5555555555555555_5555555555555555, 0xffffffffffffffff_ffffffffffffffff, 0);
@@ -1076,7 +1076,7 @@ test "@mulWithOverflow bitsize 128 bits" {
test "@mulWithOverflow bitsize 256 bits" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1142,7 +1142,7 @@ test "@subWithOverflow > 64 bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
try testSubWithOverflow(u65, 4, 105, maxInt(u65) - 100, 1);
try testSubWithOverflow(u65, 1000, 100, 900, 0);
@@ -1188,7 +1188,7 @@ fn testShlWithOverflow(comptime T: type, a: T, b: math.Log2Int(T), shl: T, bit:
test "@shlWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShlWithOverflow(u4, 2, 1, 4, 0);
@@ -1207,7 +1207,7 @@ test "@shlWithOverflow" {
test "@shlWithOverflow > 64 bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testShlWithOverflow(u65, 0x0_0100_0000_0000_0000, 7, 0x0_8000_0000_0000_0000, 0);
@@ -1232,7 +1232,7 @@ test "@shlWithOverflow > 64 bits" {
}
test "overflow arithmetic with u0 values" {
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
{
var a: u0 = 0;
@@ -1277,7 +1277,7 @@ test "quad hex float literal parsing accurate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const a: f128 = 0x1.1111222233334444555566667777p+0;
@@ -1435,7 +1435,7 @@ test "remainder division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1474,7 +1474,7 @@ test "float remainder division using @rem" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1517,7 +1517,7 @@ test "float modulo division using @mod" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1589,7 +1589,7 @@ test "@round f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1602,7 +1602,7 @@ test "@round f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1623,7 +1623,7 @@ test "vector integer addition" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
@@ -1645,7 +1645,7 @@ test "NaN comparison" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@@ -1664,7 +1664,7 @@ test "NaN comparison f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1690,7 +1690,7 @@ test "vector comparison" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -1720,7 +1720,7 @@ test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1785,7 +1785,7 @@ test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var x: u32 = 0x1;
_ = &x;
@@ -1801,7 +1801,7 @@ test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@@ -1831,7 +1831,7 @@ test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@@ -1863,7 +1863,7 @@ test "float divide by zero" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
@@ -1898,7 +1898,7 @@ test "partially-runtime integer vector division would be illegal if vector eleme
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1928,7 +1928,7 @@ test "float vector division of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
@@ -1949,7 +1949,7 @@ test "float vector multiplication of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ct_zero: @Vector(1, f32) = .{0};
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};
@@ -1966,7 +1966,7 @@ test "comptime float vector division of zero by nan is nan" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ct_zero: @Vector(1, f32) = .{0};
const ct_nan: @Vector(1, f32) = .{math.nan(f32)};
@@ -1982,7 +1982,7 @@ test "comptime float vector multiplication of zero by nan is nan" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ct_zero: @Vector(1, f32) = .{0};
const ct_nan: @Vector(1, f32) = .{math.nan(f32)};