diff options
| author | Jacob Young <15544577+jacobly0@users.noreply.github.com> | 2025-06-20 00:20:56 -0400 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-06-20 00:20:56 -0400 |
| commit | cf1a7bbd44b9542552c7b5dc6532aafb5142bf7a (patch) | |
| tree | 23d82265b3a4500514063f0fa13533b255f88f64 /test/behavior | |
| parent | f5a327cd366348a739a282f380acd627815183b5 (diff) | |
| parent | 1f98c98fffb09bf15a9fc04ecd5f1fa38a4bd4b8 (diff) | |
| download | zig-cf1a7bbd44b9542552c7b5dc6532aafb5142bf7a.tar.gz zig-cf1a7bbd44b9542552c7b5dc6532aafb5142bf7a.zip | |
Merge pull request #24193 from jacobly0/x86_64-spring-cleaning
x86_64: increase passing test coverage on windows
Diffstat (limited to 'test/behavior')
28 files changed, 280 insertions, 353 deletions
diff --git a/test/behavior/abs.zig b/test/behavior/abs.zig index a3d8b1ffc7..530802a0ca 100644 --- a/test/behavior/abs.zig +++ b/test/behavior/abs.zig @@ -156,7 +156,6 @@ test "@abs floats" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try comptime testAbsFloats(f16); if (builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f16); @@ -341,7 +340,6 @@ test "@abs float vectors" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; @setEvalBranchQuota(2000); try comptime testAbsFloatVectors(f16, 1); diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 5b7cd163be..c8166a06ab 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -302,19 +302,6 @@ test "array mult operator" { try expect(mem.eql(u8, "ab" ** 5, "ababababab")); } -const OpaqueA = opaque {}; -const OpaqueB = opaque {}; - -test "opaque types" { - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - - try expect(*OpaqueA != *OpaqueB); - - try expect(mem.eql(u8, @typeName(OpaqueA), "behavior.basic.OpaqueA")); - try expect(mem.eql(u8, @typeName(OpaqueB), "behavior.basic.OpaqueB")); -} - const global_a: i32 = 1234; const global_b: *const i32 = &global_a; const global_c: *const f32 = @as(*const f32, @ptrCast(global_b)); @@ -447,6 +434,7 @@ fn f2(x: bool) []const u8 { return (if (x) &fA else &fB)(); } +const OpaqueA = opaque {}; test "variable is allowed to be a pointer to an opaque type" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -1199,7 +1187,6 @@ test "arrays and vectors with big integers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig index 4a1f65b382..33742f21c4 100644 --- a/test/behavior/bit_shifting.zig +++ b/test/behavior/bit_shifting.zig @@ -166,7 +166,6 @@ test "Saturating Shift Left" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn shlSat(x: anytype, y: std.math.Log2Int(@TypeOf(x))) @TypeOf(x) { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 4242edaa6f..052b66e532 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -302,7 +302,6 @@ test "@bitCast packed struct of floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Foo = packed struct { @@ -342,7 +341,6 @@ test "comptime @bitCast packed struct to int and back" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = packed struct { void: void = {}, @@ -426,7 +424,6 @@ test "bitcast nan float does not modify signaling bit" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const snan_u16: u16 = 0x7D00; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index d96f658ef8..d63991930e 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -126,7 +126,6 @@ test "@floatFromInt(f80)" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -1406,7 +1405,6 @@ test "cast f16 to wider types" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -1427,7 +1425,6 @@ test "cast f128 to narrower types" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -1521,7 +1518,7 @@ test "coerce between pointers of compatible differently-named floats" { if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { @@ -1727,7 +1724,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const i: u8 = 100; var f: f32 = 1.234; @@ -2477,7 +2473,6 @@ test "@floatCast on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -2569,7 +2564,6 @@ test "@floatFromInt on vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { @@ -2626,8 +2620,6 @@ test "@intFromBool on vector" { } test "numeric coercions with undefined" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; - const from: i32 = undefined; var to: f32 = from; to = @floatFromInt(from); @@ -2648,7 +2640,6 @@ test "@as does not corrupt values with incompatible representations" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const x: f32 = @as(f16, blk: { if (false) { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index a5d737cb13..12e9de1825 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -526,7 +526,6 @@ test "runtime 128 bit integer division" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: u128 = 152313999999999991610955792383; diff --git a/test/behavior/export_keyword.zig b/test/behavior/export_keyword.zig index 509925fe8c..af2065067e 100644 --- a/test/behavior/export_keyword.zig +++ b/test/behavior/export_keyword.zig @@ -40,7 +40,7 @@ export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void { } test "export function alias" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; _ = struct { diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 355032a247..ba92318067 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -16,9 +16,6 @@ fn epsForType(comptime T: type) T { test "add f16" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testAdd(f16); try comptime testAdd(f16); } @@ -31,7 +28,6 @@ test "add f32/f64" { } test "add f80/f128/c_longdouble" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -52,7 +48,6 @@ fn testAdd(comptime T: type) !void { } test "sub f16" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testSub(f16); @@ -67,7 +62,6 @@ test "sub f32/f64" { } test "sub f80/f128/c_longdouble" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -88,7 +82,6 @@ fn testSub(comptime T: type) !void { } test "mul f16" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testMul(f16); @@ -103,7 +96,6 @@ test "mul f32/f64" { } test "mul f80/f128/c_longdouble" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -128,9 +120,6 @@ test "cmp f16" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testCmp(f16); try comptime testCmp(f16); } @@ -158,7 +147,6 @@ test "cmp f128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCmp(f128); @@ -171,8 +159,8 @@ test "cmp f80/c_longdouble" { if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testCmp(f80); try comptime testCmp(f80); @@ -242,7 +230,6 @@ test "vector cmp f16" { if (builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmpVector(f16); try comptime testCmpVector(f16); @@ -256,7 +243,6 @@ test "vector cmp f32" { if (builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmpVector(f32); try comptime testCmpVector(f32); @@ -269,7 +255,6 @@ test "vector cmp f64" { if (builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmpVector(f64); try comptime testCmpVector(f64); @@ -285,7 +270,6 @@ test "vector cmp f128" { if (builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCmpVector(f128); try comptime testCmpVector(f128); @@ -297,7 +281,7 @@ test "vector cmp f80/c_longdouble" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testCmpVector(f80); try comptime testCmpVector(f80); @@ -344,9 +328,6 @@ test "different sized float comparisons" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testDifferentSizedFloatComparisons(); try comptime testDifferentSizedFloatComparisons(); } @@ -395,9 +376,6 @@ test "@sqrt f16" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testSqrt(f16); try comptime testSqrt(f16); } @@ -418,9 +396,9 @@ test "@sqrt f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; if (builtin.os.tag == .freebsd) { // TODO https://github.com/ziglang/zig/issues/10875 @@ -527,7 +505,6 @@ test "@sin f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -539,7 +516,6 @@ test "@sin f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -553,9 +529,9 @@ test "@sin f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testSin(f80); comptime try testSin(f80); @@ -581,7 +557,6 @@ test "@sin with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -603,7 +578,6 @@ test "@cos f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -615,7 +589,6 @@ test "@cos f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -629,9 +602,9 @@ test "@cos f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testCos(f80); try comptime testCos(f80); @@ -657,7 +630,6 @@ test "@cos with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -679,7 +651,6 @@ test "@tan f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -691,7 +662,6 @@ test "@tan f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -705,9 +675,9 @@ test "@tan f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testTan(f80); try comptime testTan(f80); @@ -733,7 +703,6 @@ test "@tan with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -755,7 +724,6 @@ test "@exp f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -767,7 +735,6 @@ test "@exp f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -781,9 +748,9 @@ test "@exp f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testExp(f80); try comptime testExp(f80); @@ -813,7 +780,6 @@ test "@exp with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -835,7 +801,6 @@ test "@exp2 f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -847,7 +812,6 @@ test "@exp2 f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -861,9 +825,9 @@ test "@exp2 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testExp2(f80); try comptime testExp2(f80); @@ -888,7 +852,6 @@ test "@exp2 with @vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -910,7 +873,6 @@ test "@log f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -922,7 +884,6 @@ test "@log f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -936,9 +897,9 @@ test "@log f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testLog(f80); try comptime testLog(f80); @@ -964,7 +925,6 @@ test "@log with @vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -983,7 +943,6 @@ test "@log2 f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -995,7 +954,6 @@ test "@log2 f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -1009,9 +967,9 @@ test "@log2 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testLog2(f80); try comptime testLog2(f80); @@ -1042,7 +1000,6 @@ test "@log2 with vectors" { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64 and builtin.os.tag == .windows) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testLog2WithVectors(); try comptime testLog2WithVectors(); @@ -1062,7 +1019,6 @@ test "@log10 f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -1074,7 +1030,6 @@ test "@log10 f32/f64" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -1088,9 +1043,9 @@ test "@log10 f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testLog10(f80); try comptime testLog10(f80); @@ -1115,7 +1070,6 @@ test "@log10 with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -1139,9 +1093,6 @@ test "@abs f16" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testFabs(f16); try comptime testFabs(f16); } @@ -1162,9 +1113,9 @@ test "@abs f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testFabs(f80); try comptime testFabs(f80); @@ -1262,7 +1213,6 @@ test "@floor f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testFloor(f16); @@ -1275,9 +1225,6 @@ test "@floor f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testFloor(f32); try comptime testFloor(f32); try testFloor(f64); @@ -1342,9 +1289,6 @@ test "@floor with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testFloorWithVectors(); try comptime testFloorWithVectors(); } @@ -1363,7 +1307,6 @@ test "@ceil f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testCeil(f16); @@ -1376,9 +1319,6 @@ test "@ceil f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testCeil(f32); try comptime testCeil(f32); try testCeil(f64); @@ -1443,9 +1383,6 @@ test "@ceil with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testCeilWithVectors(); try comptime testCeilWithVectors(); } @@ -1464,7 +1401,6 @@ test "@trunc f16" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testTrunc(f16); @@ -1477,9 +1413,6 @@ test "@trunc f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testTrunc(f32); try comptime testTrunc(f32); try testTrunc(f64); @@ -1491,9 +1424,9 @@ test "@trunc f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { // https://github.com/ziglang/zig/issues/12602 @@ -1544,9 +1477,6 @@ test "@trunc with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - try testTruncWithVectors(); try comptime testTruncWithVectors(); } @@ -1566,9 +1496,7 @@ test "neg f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.os.tag == .freebsd) { // TODO file issue to track this failure @@ -1597,8 +1525,8 @@ test "neg f80/f128/c_longdouble" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; try testNeg(f80); try comptime testNeg(f80); @@ -1704,7 +1632,6 @@ test "comptime fixed-width float zero divided by zero produces NaN" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; inline for (.{ f16, f32, f64, f80, f128 }) |F| { diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 372f440752..bb7bf88676 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -587,7 +587,6 @@ fn StructCapture(comptime T: type) type { } test "call generic function that uses capture from function declaration's scope" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 83f18d8909..eb410f0efe 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -65,7 +65,6 @@ test "@clz" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; try testClz(); @@ -474,9 +473,6 @@ test "division" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - try testIntDivision(); try comptime testIntDivision(); @@ -589,7 +585,6 @@ fn testFloatDivision() !void { } test "large integer division" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; @@ -615,7 +610,6 @@ test "division half-precision floats" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testDivisionFP16(); @@ -757,7 +751,6 @@ test "f128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try test_f128(); @@ -843,7 +836,6 @@ test "128-bit multiplication" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1052,7 +1044,6 @@ test "@mulWithOverflow bitsize 128 bits" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testMulWithOverflow(u128, 3, 0x5555555555555555_5555555555555555, 0xffffffffffffffff_ffffffffffffffff, 0); try testMulWithOverflow(u128, 3, 0x5555555555555555_5555555555555556, 2, 1); @@ -1078,7 +1069,6 @@ test "@mulWithOverflow bitsize 256 bits" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; { const const_lhs: u256 = 8035709466408580321693645878924206181189; @@ -1475,7 +1465,6 @@ test "float remainder division using @rem" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime frem(f16); @@ -1560,7 +1549,6 @@ test "@round f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f16, 12.0); @@ -1571,7 +1559,6 @@ test "@round f32/f64" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f64, 12.0); @@ -1591,7 +1578,6 @@ test "@round f80" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f80, 12.0); @@ -1604,7 +1590,6 @@ test "@round f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testRound(f128, 12.0); @@ -1624,7 +1609,6 @@ test "vector integer addition" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1646,7 +1630,6 @@ test "NaN comparison" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 @@ -1665,7 +1648,6 @@ test "NaN comparison f80" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try testNanEqNan(f80); @@ -1722,7 +1704,7 @@ test "signed zeros are represented properly" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1900,7 +1882,6 @@ test "partially-runtime integer vector division would be illegal if vector eleme if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; var lhs: @Vector(2, i8) = .{ -128, 5 }; const rhs: @Vector(2, i8) = .{ 1, -1 }; @@ -1930,9 +1911,6 @@ test "float vector division of comptime zero by runtime nan is nan" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest; - const ct_zero: @Vector(1, f32) = .{0}; var rt_nan: @Vector(1, f32) = .{math.nan(f32)}; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index d20bb42337..b34df4b784 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -122,7 +122,6 @@ test "@min/max for floats" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest(comptime T: type) !void { diff --git a/test/behavior/memmove.zig b/test/behavior/memmove.zig index 8bfc8f66ad..36af982c84 100644 --- a/test/behavior/memmove.zig +++ b/test/behavior/memmove.zig @@ -9,7 +9,6 @@ test "memmove and memset intrinsics" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testMemmoveMemset(); try comptime testMemmoveMemset(); @@ -39,7 +38,6 @@ test "@memmove with both operands single-ptr-to-array, one is null-terminated" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testMemmoveBothSinglePtrArrayOneIsNullTerminated(); try comptime testMemmoveBothSinglePtrArrayOneIsNullTerminated(); @@ -85,7 +83,6 @@ test "@memmove dest many pointer" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testMemmoveDestManyPtr(); try comptime testMemmoveDestManyPtr(); @@ -129,7 +126,6 @@ test "@memmove slice" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testMemmoveSlice(); try comptime testMemmoveSlice(); diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index 87416d1af2..210485b239 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -9,9 +9,6 @@ test "@mulAdd" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest; - try comptime testMulAdd(); try testMulAdd(); } @@ -37,7 +34,6 @@ test "@mulAdd f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -59,7 +55,6 @@ test "@mulAdd f80" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testMulAdd80(); @@ -80,7 +75,6 @@ test "@mulAdd f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime testMulAdd128(); @@ -113,7 +107,6 @@ test "vector f16" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; @@ -142,9 +135,6 @@ test "vector f32" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest; - try comptime vector32(); try vector32(); } @@ -170,9 +160,6 @@ test "vector f64" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest; - try comptime vector64(); try vector64(); } @@ -196,7 +183,6 @@ test "vector f80" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector80(); @@ -223,7 +209,6 @@ test "vector f128" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; try comptime vector128(); diff --git a/test/behavior/multiple_externs_with_conflicting_types.zig b/test/behavior/multiple_externs_with_conflicting_types.zig index 37b6fd269c..7b3453695c 100644 --- a/test/behavior/multiple_externs_with_conflicting_types.zig +++ b/test/behavior/multiple_externs_with_conflicting_types.zig @@ -15,7 +15,7 @@ test "call extern function defined with conflicting type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @import("conflicting_externs/a.zig").issue529(null); diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 4e5f25f81d..da67aee55e 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -59,7 +59,6 @@ fn testNullPtrsEql() !void { } test "optional with zero-bit type" { - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; const S = struct { diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 34659e08db..e6b2409154 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -661,7 +661,6 @@ test "nested packed struct field access test" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const Vec2 = packed struct { diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig index 04cdbef05e..1abd5b4dab 100644 --- a/test/behavior/saturating_arithmetic.zig +++ b/test/behavior/saturating_arithmetic.zig @@ -58,7 +58,6 @@ test "saturating add 128bit" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -132,7 +131,6 @@ test "saturating subtraction 128bit" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -233,7 +231,6 @@ test "saturating multiplication <= 32 bits" { test "saturating mul i64, i128" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testSatMul(i64, 0, maxInt(i64), 0); try testSatMul(i64, 0, minInt(i64), 0); @@ -266,7 +263,6 @@ test "saturating multiplication" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) { // https://github.com/ziglang/zig/issues/9660 @@ -304,7 +300,6 @@ test "saturating shift-left" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -349,7 +344,6 @@ test "saturating shift-left large rhs" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; { var lhs: u8 = undefined; @@ -368,7 +362,6 @@ test "saturating shl uses the LHS type" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const lhs_const: u8 = 1; var lhs_var: u8 = 1; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 6b6d4e7a34..365edcde69 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -561,7 +561,6 @@ test "packed struct with non-ABI-aligned field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = packed struct { x: u9, diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 1f22ac20ae..891dd2726e 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -397,7 +397,6 @@ test "tuple of struct concatenation and coercion to array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index b55aa92134..106eb7de77 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -282,7 +282,6 @@ test "cast union to tag type of union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; try testCastUnionToTag(); try comptime testCastUnionToTag(); @@ -2262,7 +2261,6 @@ test "signed enum tag with negative value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const Enum = enum(i8) { a = -1, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 9ca822a372..f2e5807267 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -80,7 +80,6 @@ test "vector int operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -104,7 +103,6 @@ test "vector float operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // Triggers an assertion with LLVM 18: @@ -260,9 +258,6 @@ test "array to vector with element type coercion" { if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and - !comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var foo: f16 = 3.14; @@ -301,7 +296,6 @@ test "tuple to vector" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -365,7 +359,6 @@ test "vector @splat" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn testForT(comptime N: comptime_int, v: anytype) !void { @@ -567,7 +560,6 @@ test "vector division operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTestDiv(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void { @@ -718,7 +710,6 @@ test "vector shift operators" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTestShift(x: anytype, y: anytype) !void { @@ -793,7 +784,6 @@ test "vector reduce operation" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21091 if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isSPARC()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23719 - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn testReduce(comptime op: std.builtin.ReduceOp, x: anytype, expected: anytype) !void { @@ -1047,7 +1037,6 @@ test "saturating shift-left" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1072,7 +1061,6 @@ test "multiplication-assignment operator with an array operand" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1332,7 +1320,6 @@ test "zero multiplicand" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; const zeros = @Vector(2, u32){ 0.0, 0.0 }; var ones = @Vector(2, u32){ 1.0, 1.0 }; @@ -1395,7 +1382,6 @@ test "load packed vector element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: @Vector(2, u15) = .{ 1, 4 }; @@ -1426,7 +1412,6 @@ test "store to vector in slice" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; var v = [_]@Vector(3, f32){ .{ 1, 1, 1 }, diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index be81ac4cf3..9219b3700f 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -44,7 +44,6 @@ test "float widening" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: f16 = 12.34; @@ -65,7 +64,6 @@ test "float widening f16 to f128" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var x: f16 = 12.34; diff --git a/test/behavior/x86_64.zig b/test/behavior/x86_64.zig index def436dde4..689b084858 100644 --- a/test/behavior/x86_64.zig +++ b/test/behavior/x86_64.zig @@ -5,8 +5,6 @@ test { if (builtin.zig_backend != .stage2_x86_64) return error.SkipZigTest; // MachO linker does not support executables this big. if (builtin.object_format == .macho) return error.SkipZigTest; - // COFF linker does not support the new backend. - if (builtin.object_format == .coff) return error.SkipZigTest; _ = @import("x86_64/access.zig"); _ = @import("x86_64/binary.zig"); _ = @import("x86_64/cast.zig"); diff --git a/test/behavior/x86_64/binary.zig b/test/behavior/x86_64/binary.zig index ee34840b9e..99dd47155b 100644 --- a/test/behavior/x86_64/binary.zig +++ b/test/behavior/x86_64/binary.zig @@ -14,6 +14,7 @@ const Log2Int = math.Log2Int; const math = @import("math.zig"); const nan = math.nan; const Scalar = math.Scalar; +const select = math.select; const sign = math.sign; const splat = math.splat; const Sse = math.Sse; @@ -84,6 +85,12 @@ fn binary(comptime op: anytype, comptime opts: struct { compare: Compare = .rela imm_rhs, ); } + fn testBools() !void { + try testArgs(bool, false, false); + try testArgs(bool, false, true); + try testArgs(bool, true, false); + try testArgs(bool, true, true); + } fn testInts() !void { try testArgs(i1, 0x0, -0x1); try testArgs(u1, 0x1, 0x1); @@ -1881,6 +1888,23 @@ fn binary(comptime op: anytype, comptime opts: struct { compare: Compare = .rela try testArgs(f128, nan(f128), inf(f128)); try testArgs(f128, nan(f128), nan(f128)); } + fn testBoolVectors() !void { + try testArgs(@Vector(1, bool), .{ + false, + }, .{ + true, + }); + try testArgs(@Vector(2, bool), .{ + false, true, + }, .{ + true, false, + }); + try testArgs(@Vector(4, bool), .{ + false, false, true, true, + }, .{ + false, true, false, true, + }); + } fn testIntVectors() !void { try testArgs(@Vector(1, i1), .{ 0x0, @@ -5033,8 +5057,7 @@ inline fn addSafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) { test addSafe { const test_add_safe = binary(addSafe, .{}); try test_add_safe.testInts(); - try test_add_safe.testFloats(); - try test_add_safe.testFloatVectors(); + try test_add_safe.testIntVectors(); } inline fn addWrap(comptime Type: type, lhs: Type, rhs: Type) Type { @@ -5046,13 +5069,13 @@ test addWrap { try test_add_wrap.testIntVectors(); } -inline fn addSat(comptime Type: type, lhs: Type, rhs: Type) Type { +inline fn addSaturate(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs +| rhs; } -test addSat { - const test_add_sat = binary(addSat, .{}); - try test_add_sat.testInts(); - try test_add_sat.testIntVectors(); +test addSaturate { + const test_add_saturate = binary(addSaturate, .{}); + try test_add_saturate.testInts(); + try test_add_saturate.testIntVectors(); } inline fn subUnsafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) { @@ -5088,8 +5111,7 @@ inline fn subSafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) { test subSafe { const test_sub_safe = binary(subSafe, .{}); try test_sub_safe.testInts(); - try test_sub_safe.testFloats(); - try test_sub_safe.testFloatVectors(); + try test_sub_safe.testIntVectors(); } inline fn subWrap(comptime Type: type, lhs: Type, rhs: Type) Type { @@ -5101,13 +5123,13 @@ test subWrap { try test_sub_wrap.testIntVectors(); } -inline fn subSat(comptime Type: type, lhs: Type, rhs: Type) Type { +inline fn subSaturate(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs -| rhs; } -test subSat { - const test_sub_sat = binary(subSat, .{}); - try test_sub_sat.testInts(); - try test_sub_sat.testIntVectors(); +test subSaturate { + const test_sub_saturate = binary(subSaturate, .{}); + try test_sub_saturate.testInts(); + try test_sub_saturate.testIntVectors(); } inline fn mulUnsafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) { @@ -5118,6 +5140,8 @@ test mulUnsafe { const test_mul_unsafe = binary(mulUnsafe, .{}); try test_mul_unsafe.testInts(); try test_mul_unsafe.testIntVectors(); + try test_mul_unsafe.testFloats(); + try test_mul_unsafe.testFloatVectors(); } inline fn mulSafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) { @@ -5127,6 +5151,7 @@ inline fn mulSafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) { test mulSafe { const test_mul_safe = binary(mulSafe, .{}); try test_mul_safe.testInts(); + try test_mul_safe.testIntVectors(); } inline fn mulWrap(comptime Type: type, lhs: Type, rhs: Type) Type { @@ -5138,16 +5163,16 @@ test mulWrap { try test_mul_wrap.testIntVectors(); } -inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type { +inline fn mulSaturate(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs *| rhs; } -test mulSat { - const test_mul_sat = binary(mulSat, .{}); - try test_mul_sat.testInts(); - try test_mul_sat.testIntVectors(); +test mulSaturate { + const test_mul_saturate = binary(mulSaturate, .{}); + try test_mul_saturate.testInts(); + try test_mul_saturate.testIntVectors(); } -inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) { +inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs * rhs; } test multiply { @@ -5156,7 +5181,7 @@ test multiply { try test_multiply.testFloatVectors(); } -inline fn divide(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs / rhs) { +inline fn divide(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs / rhs; } test divide { @@ -5165,29 +5190,49 @@ test divide { try test_divide.testFloatVectors(); } -inline fn divTrunc(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divTrunc(lhs, rhs)) { +inline fn divTruncUnoptimized(comptime Type: type, lhs: Type, rhs: Type) Type { return @divTrunc(lhs, rhs); } -test divTrunc { - const test_div_trunc = binary(divTrunc, .{ .compare = .approx_int }); - try test_div_trunc.testInts(); - try test_div_trunc.testIntVectors(); - try test_div_trunc.testFloats(); - try test_div_trunc.testFloatVectors(); +test divTruncUnoptimized { + const test_div_trunc_unoptimized = binary(divTruncUnoptimized, .{ .compare = .approx_int }); + try test_div_trunc_unoptimized.testInts(); + try test_div_trunc_unoptimized.testIntVectors(); + try test_div_trunc_unoptimized.testFloats(); + try test_div_trunc_unoptimized.testFloatVectors(); +} + +inline fn divTruncOptimized(comptime Type: type, lhs: Type, rhs: Type) Type { + @setFloatMode(.optimized); + return @divTrunc(lhs, select(@abs(rhs) > splat(Type, 0.0), rhs, splat(Type, 1.0))); +} +test divTruncOptimized { + const test_div_trunc_optimized = binary(divTruncOptimized, .{ .compare = .approx_int }); + try test_div_trunc_optimized.testFloats(); + try test_div_trunc_optimized.testFloatVectors(); } -inline fn divFloor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divFloor(lhs, rhs)) { +inline fn divFloorUnoptimized(comptime Type: type, lhs: Type, rhs: Type) Type { return @divFloor(lhs, rhs); } -test divFloor { - const test_div_floor = binary(divFloor, .{ .compare = .approx_int }); - try test_div_floor.testInts(); - try test_div_floor.testIntVectors(); - try test_div_floor.testFloats(); - try test_div_floor.testFloatVectors(); +test divFloorUnoptimized { + const test_div_floor_unoptimized = binary(divFloorUnoptimized, .{ .compare = .approx_int }); + try test_div_floor_unoptimized.testInts(); + try test_div_floor_unoptimized.testIntVectors(); + try test_div_floor_unoptimized.testFloats(); + try test_div_floor_unoptimized.testFloatVectors(); +} + +inline fn divFloorOptimized(comptime Type: type, lhs: Type, rhs: Type) Type { + @setFloatMode(.optimized); + return @divFloor(lhs, select(@abs(rhs) > splat(Type, 0.0), rhs, splat(Type, 1.0))); +} +test divFloorOptimized { + const test_div_floor_optimized = binary(divFloorOptimized, .{ .compare = .approx_int }); + try test_div_floor_optimized.testFloats(); + try test_div_floor_optimized.testFloatVectors(); } -inline fn rem(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@rem(lhs, rhs)) { +inline fn rem(comptime Type: type, lhs: Type, rhs: Type) Type { return @rem(lhs, rhs); } test rem { @@ -5198,7 +5243,7 @@ test rem { try test_rem.testFloatVectors(); } -inline fn mod(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mod(lhs, rhs)) { +inline fn mod(comptime Type: type, lhs: Type, rhs: Type) Type { // workaround llvm backend bugs if (@inComptime() and @typeInfo(Scalar(Type)) == .float) { const scalarMod = struct { @@ -5219,6 +5264,7 @@ inline fn mod(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mod(lhs, rhs)) return @mod(lhs, rhs); } test mod { + if (@import("builtin").object_format == .coff) return error.SkipZigTest; const test_mod = binary(mod, .{}); try test_mod.testInts(); try test_mod.testIntVectors(); @@ -5286,7 +5332,7 @@ test shlWithOverflow { try test_shl_with_overflow.testIntVectors(); } -inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) { +inline fn equal(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs == rhs; } test equal { @@ -5297,7 +5343,7 @@ test equal { try test_equal.testFloatVectors(); } -inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) { +inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs != rhs; } test notEqual { @@ -5308,7 +5354,7 @@ test notEqual { try test_not_equal.testFloatVectors(); } -inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) { +inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs < rhs; } test lessThan { @@ -5319,7 +5365,7 @@ test lessThan { try test_less_than.testFloatVectors(); } -inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) { +inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs <= rhs; } test lessThanOrEqual { @@ -5330,7 +5376,7 @@ test lessThanOrEqual { try test_less_than_or_equal.testFloatVectors(); } -inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) { +inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs > rhs; } test greaterThan { @@ -5341,7 +5387,7 @@ test greaterThan { try test_greater_than.testFloatVectors(); } -inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) { +inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) { return lhs >= rhs; } test greaterThanOrEqual { @@ -5352,20 +5398,24 @@ test greaterThanOrEqual { try test_greater_than_or_equal.testFloatVectors(); } -inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) { +inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs & rhs; } test bitAnd { const test_bit_and = binary(bitAnd, .{}); + try test_bit_and.testBools(); + try test_bit_and.testBoolVectors(); try test_bit_and.testInts(); try test_bit_and.testIntVectors(); } -inline fn bitOr(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs | rhs) { +inline fn bitOr(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs | rhs; } test bitOr { const test_bit_or = binary(bitOr, .{}); + try test_bit_or.testBools(); + try test_bit_or.testBoolVectors(); try test_bit_or.testInts(); try test_bit_or.testIntVectors(); } @@ -5417,7 +5467,7 @@ test shlExactUnsafe { try test_shl_exact_unsafe.testIntVectors(); } -inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type { +inline fn shlSaturate(comptime Type: type, lhs: Type, rhs: Type) Type { // workaround https://github.com/ziglang/zig/issues/23034 if (@inComptime()) { // workaround https://github.com/ziglang/zig/issues/23139 @@ -5427,17 +5477,19 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type { @setRuntimeSafety(false); return lhs <<| @abs(rhs); } -test shlSat { - const test_shl_sat = binary(shlSat, .{}); - try test_shl_sat.testInts(); - try test_shl_sat.testIntVectors(); +test shlSaturate { + const test_shl_saturate = binary(shlSaturate, .{}); + try test_shl_saturate.testInts(); + try test_shl_saturate.testIntVectors(); } -inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) { +inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) Type { return lhs ^ rhs; } test bitXor { const test_bit_xor = binary(bitXor, .{}); + try test_bit_xor.testBools(); + try test_bit_xor.testBoolVectors(); try test_bit_xor.testInts(); try test_bit_xor.testIntVectors(); } @@ -5516,7 +5568,7 @@ test reduceXorNotEqual { try test_reduce_xor_not_equal.testFloatVectors(); } -inline fn mulAdd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mulAdd(Type, lhs, rhs, rhs)) { +inline fn mulAdd(comptime Type: type, lhs: Type, rhs: Type) Type { return @mulAdd(Type, lhs, rhs, rhs); } test mulAdd { diff --git a/test/behavior/x86_64/build.zig b/test/behavior/x86_64/build.zig index 8d0e27f964..52aeba6d7d 100644 --- a/test/behavior/x86_64/build.zig +++ b/test/behavior/x86_64/build.zig @@ -113,8 +113,54 @@ pub fn build(b: *std.Build) void { .cpu_arch = .x86_64, .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v4 }, }, + + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 }, + .os_tag = .windows, + .abi = .none, + }, + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 }, + .cpu_features_add = std.Target.x86.featureSet(&.{.ssse3}), + .os_tag = .windows, + .abi = .none, + }, + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v2 }, + .os_tag = .windows, + .abi = .none, + }, + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v3 }, + .os_tag = .windows, + .abi = .none, + }, + + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 }, + .os_tag = .windows, + .abi = .gnu, + }, + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v2 }, + .os_tag = .windows, + .abi = .gnu, + }, + .{ + .cpu_arch = .x86_64, + .cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v3 }, + .os_tag = .windows, + .abi = .gnu, + }, }) |query| { const target = b.resolveTargetQuery(query); + const triple = query.zigTriple(b.allocator) catch @panic("OOM"); const cpu = query.serializeCpuAlloc(b.allocator) catch @panic("OOM"); for ([_][]const u8{ "access.zig", @@ -133,16 +179,14 @@ pub fn build(b: *std.Build) void { .use_lld = false, .root_module = test_mod, }); + test_exe.step.name = b.fmt("{s} {s}", .{ test_exe.step.name, cpu }); if (!target.result.cpu.has(.x86, .sse2)) { test_exe.bundle_compiler_rt = false; test_mod.linkLibrary(compiler_rt_lib); } const test_run = b.addRunArtifact(test_exe); + test_run.step.name = b.fmt("{s} {s} {s}", .{ test_run.step.name, triple, cpu }); b.default_step.dependOn(&test_run.step); - for ([_]*std.Build.Step{ - &test_exe.step, - &test_run.step, - }) |step| step.name = b.fmt("{s} {s}", .{ step.name, cpu }); } } } diff --git a/test/behavior/x86_64/cast.zig b/test/behavior/x86_64/cast.zig index 6e52dc33a2..dd538d9c67 100644 --- a/test/behavior/x86_64/cast.zig +++ b/test/behavior/x86_64/cast.zig @@ -14546,13 +14546,24 @@ test floatCast { try test_float_cast.testFloatVectors(); } -inline fn intFromFloat(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result { +inline fn intFromFloatUnsafe(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result { + @setRuntimeSafety(false); return @intFromFloat(rhs); } -test intFromFloat { - const test_int_from_float = cast(intFromFloat, .{ .compare = .strict }); - try test_int_from_float.testIntsFromFloats(); - try test_int_from_float.testIntVectorsFromFloatVectors(); +test intFromFloatUnsafe { + const test_int_from_float_unsafe = cast(intFromFloatUnsafe, .{ .compare = .strict }); + try test_int_from_float_unsafe.testIntsFromFloats(); + try test_int_from_float_unsafe.testIntVectorsFromFloatVectors(); +} + +inline fn intFromFloatSafe(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result { + @setRuntimeSafety(true); + return @intFromFloat(rhs); +} +test intFromFloatSafe { + const test_int_from_float_safe = cast(intFromFloatSafe, .{ .compare = .strict }); + try test_int_from_float_safe.testIntsFromFloats(); + try test_int_from_float_safe.testIntVectorsFromFloatVectors(); } inline fn floatFromInt(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result { diff --git a/test/behavior/x86_64/math.zig b/test/behavior/x86_64/math.zig index 759c8e8de9..5f0473e0d5 100644 --- a/test/behavior/x86_64/math.zig +++ b/test/behavior/x86_64/math.zig @@ -35,10 +35,14 @@ pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type { }; } pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type { - return ChangeScalar(Type, @Type(.{ .int = .{ - .signedness = signedness, - .bits = @typeInfo(Scalar(Type)).int.bits, - } })); + return switch (@typeInfo(Scalar(Type))) { + .int => |int| ChangeScalar(Type, @Type(.{ .int = .{ + .signedness = signedness, + .bits = int.bits, + } })), + .float => Type, + else => @compileError(@typeName(Type)), + }; } pub fn AddOneBit(comptime Type: type) type { return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) { @@ -56,7 +60,10 @@ pub fn DoubleBits(comptime Type: type) type { } pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type { return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) { - .int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }), + .int => |int| @Type(.{ .int = .{ + .signedness = int.signedness, + .bits = std.mem.alignForward(u16, int.bits, multiple), + } }), .float => Scalar(Type), else => @compileError(@typeName(Type)), }); @@ -67,61 +74,30 @@ pub fn Log2Int(comptime Type: type) type { pub fn Log2IntCeil(comptime Type: type) type { return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type))); } -// inline to avoid a runtime `@splat` -pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type { +pub fn splat(comptime Type: type, scalar: Scalar(Type)) Type { return switch (@typeInfo(Type)) { else => scalar, .vector => @splat(scalar), }; } -// inline to avoid a runtime `@select` -inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { +pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) { + const Int = ChangeScalar(@TypeOf(rhs), switch (@typeInfo(Scalar(@TypeOf(rhs)))) { + .int, .comptime_int => Scalar(@TypeOf(rhs)), + .float => |float| @Type(.{ .int = .{ + .signedness = .signed, + .bits = float.bits, + } }), + else => @compileError(@typeName(@TypeOf(rhs))), + }); + return @as(Int, @bitCast(rhs)) < splat(Int, 0); +} +pub fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { return switch (@typeInfo(@TypeOf(cond))) { .bool => if (cond) lhs else rhs, .vector => @select(Scalar(@TypeOf(lhs)), cond, lhs, rhs), else => @compileError(@typeName(@TypeOf(cond))), }; } -pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) { - const ScalarInt = @Type(.{ .int = .{ - .signedness = .unsigned, - .bits = @bitSizeOf(Scalar(@TypeOf(rhs))), - } }); - const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt); - return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0); -} -fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { - switch (@typeInfo(@TypeOf(lhs))) { - .bool => return lhs and rhs, - .vector => |vector| switch (vector.child) { - bool => { - const Bits = @Type(.{ .int = .{ .signedness = .unsigned, .bits = vector.len } }); - const lhs_bits: Bits = @bitCast(lhs); - const rhs_bits: Bits = @bitCast(rhs); - return @bitCast(lhs_bits & rhs_bits); - }, - else => {}, - }, - else => {}, - } - @compileError("unsupported boolAnd type: " ++ @typeName(@TypeOf(lhs))); -} -fn boolOr(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) { - switch (@typeInfo(@TypeOf(lhs))) { - .bool => return lhs or rhs, - .vector => |vector| switch (vector.child) { - bool => { - const Bits = @Type(.{ .int = .{ .signedness = .unsigned, .bits = vector.len } }); - const lhs_bits: Bits = @bitCast(lhs); - const rhs_bits: Bits = @bitCast(rhs); - return @bitCast(lhs_bits | rhs_bits); - }, - else => {}, - }, - else => {}, - } - @compileError("unsupported boolOr type: " ++ @typeName(@TypeOf(lhs))); -} pub const Compare = enum { strict, relaxed, approx, approx_int, approx_or_overflow }; // noinline for a more helpful stack trace @@ -131,9 +107,9 @@ pub noinline fn checkExpected(expected: anytype, actual: @TypeOf(expected), comp else => expected != actual, .float => switch (compare) { .strict, .relaxed => { - const unequal = boolAnd(expected != actual, boolOr(expected == expected, actual == actual)); + const unequal = (expected != actual) & ((expected == expected) | (actual == actual)); break :unexpected switch (compare) { - .strict => boolOr(unequal, sign(expected) != sign(actual)), + .strict => unequal | (sign(expected) != sign(actual)), .relaxed => unequal, .approx, .approx_int, .approx_or_overflow => comptime unreachable, }; @@ -156,10 +132,10 @@ pub noinline fn checkExpected(expected: anytype, actual: @TypeOf(expected), comp break :unexpected switch (compare) { .strict, .relaxed => comptime unreachable, .approx, .approx_int => approx_unequal, - .approx_or_overflow => boolAnd(approx_unequal, boolOr(boolAnd( - @abs(expected) != splat(Expected, inf(Expected)), - @abs(actual) != splat(Expected, inf(Expected)), - ), sign(expected) != sign(actual))), + .approx_or_overflow => approx_unequal & + (((@abs(expected) != splat(Expected, inf(Expected))) & + (@abs(actual) != splat(Expected, inf(Expected)))) | + (sign(expected) != sign(actual))), }; }, }, diff --git a/test/behavior/x86_64/unary.zig b/test/behavior/x86_64/unary.zig index 132d17b42d..5fd6f137b2 100644 --- a/test/behavior/x86_64/unary.zig +++ b/test/behavior/x86_64/unary.zig @@ -1,9 +1,11 @@ +const AsSignedness = math.AsSignedness; const checkExpected = math.checkExpected; const Compare = math.Compare; const fmax = math.fmax; const fmin = math.fmin; const Gpr = math.Gpr; const inf = math.inf; +const Log2IntCeil = math.Log2IntCeil; const math = @import("math.zig"); const nan = math.nan; const RoundBitsUp = math.RoundBitsUp; @@ -56,6 +58,10 @@ fn unary(comptime op: anytype, comptime opts: struct { f128 => libc_name ++ "q", else => break :libc, }, + .library_name = switch (@import("builtin").object_format) { + else => null, + .coff => "compiler_rt", + }, }); switch (@typeInfo(Type)) { else => break :expected libc_func(imm_arg), @@ -98,6 +104,10 @@ fn unary(comptime op: anytype, comptime opts: struct { imm_arg, ); } + fn testBools() !void { + try testArgs(bool, false); + try testArgs(bool, true); + } fn testIntTypes() !void { try testArgs(i1, undefined); try testArgs(u1, undefined); @@ -4804,16 +4814,27 @@ fn unary(comptime op: anytype, comptime opts: struct { }; } -inline fn bitNot(comptime Type: type, rhs: Type) @TypeOf(~rhs) { +inline fn boolNot(comptime Type: type, rhs: Type) Type { + return !rhs; +} +test boolNot { + const test_bool_not = unary(boolNot, .{}); + try test_bool_not.testBools(); + try test_bool_not.testBoolVectors(); +} + +inline fn bitNot(comptime Type: type, rhs: Type) Type { return ~rhs; } test bitNot { const test_bit_not = unary(bitNot, .{}); + try test_bit_not.testBools(); + try test_bit_not.testBoolVectors(); try test_bit_not.testInts(); try test_bit_not.testIntVectors(); } -inline fn clz(comptime Type: type, rhs: Type) @TypeOf(@clz(rhs)) { +inline fn clz(comptime Type: type, rhs: Type) Log2IntCeil(Type) { return @clz(rhs); } test clz { @@ -4822,7 +4843,7 @@ test clz { try test_clz.testIntVectors(); } -inline fn ctz(comptime Type: type, rhs: Type) @TypeOf(@ctz(rhs)) { +inline fn ctz(comptime Type: type, rhs: Type) Log2IntCeil(Type) { return @ctz(rhs); } test ctz { @@ -4831,7 +4852,7 @@ test ctz { try test_ctz.testIntVectors(); } -inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) { +inline fn popCount(comptime Type: type, rhs: Type) Log2IntCeil(Type) { return @popCount(rhs); } test popCount { @@ -4849,7 +4870,7 @@ test byteSwap { try test_byte_swap.testIntVectors(); } -inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) { +inline fn bitReverse(comptime Type: type, rhs: Type) Type { return @bitReverse(rhs); } test bitReverse { @@ -4858,7 +4879,7 @@ test bitReverse { try test_bit_reverse.testIntVectors(); } -inline fn sqrt(comptime Type: type, rhs: Type) @TypeOf(@sqrt(rhs)) { +inline fn sqrt(comptime Type: type, rhs: Type) Type { return @sqrt(rhs); } test sqrt { @@ -4867,7 +4888,7 @@ test sqrt { try test_sqrt.testFloatVectors(); } -inline fn sin(comptime Type: type, rhs: Type) @TypeOf(@sin(rhs)) { +inline fn sin(comptime Type: type, rhs: Type) Type { return @sin(rhs); } test sin { @@ -4876,7 +4897,7 @@ test sin { try test_sin.testFloatVectors(); } -inline fn cos(comptime Type: type, rhs: Type) @TypeOf(@cos(rhs)) { +inline fn cos(comptime Type: type, rhs: Type) Type { return @cos(rhs); } test cos { @@ -4885,7 +4906,7 @@ test cos { try test_cos.testFloatVectors(); } -inline fn tan(comptime Type: type, rhs: Type) @TypeOf(@tan(rhs)) { +inline fn tan(comptime Type: type, rhs: Type) Type { return @tan(rhs); } test tan { @@ -4894,7 +4915,7 @@ test tan { try test_tan.testFloatVectors(); } -inline fn exp(comptime Type: type, rhs: Type) @TypeOf(@exp(rhs)) { +inline fn exp(comptime Type: type, rhs: Type) Type { return @exp(rhs); } test exp { @@ -4903,7 +4924,7 @@ test exp { try test_exp.testFloatVectors(); } -inline fn exp2(comptime Type: type, rhs: Type) @TypeOf(@exp2(rhs)) { +inline fn exp2(comptime Type: type, rhs: Type) Type { return @exp2(rhs); } test exp2 { @@ -4912,7 +4933,7 @@ test exp2 { try test_exp2.testFloatVectors(); } -inline fn log(comptime Type: type, rhs: Type) @TypeOf(@log(rhs)) { +inline fn log(comptime Type: type, rhs: Type) Type { return @log(rhs); } test log { @@ -4921,7 +4942,7 @@ test log { try test_log.testFloatVectors(); } -inline fn log2(comptime Type: type, rhs: Type) @TypeOf(@log2(rhs)) { +inline fn log2(comptime Type: type, rhs: Type) Type { return @log2(rhs); } test log2 { @@ -4930,7 +4951,7 @@ test log2 { try test_log2.testFloatVectors(); } -inline fn log10(comptime Type: type, rhs: Type) @TypeOf(@log10(rhs)) { +inline fn log10(comptime Type: type, rhs: Type) Type { return @log10(rhs); } test log10 { @@ -4939,7 +4960,7 @@ test log10 { try test_log10.testFloatVectors(); } -inline fn abs(comptime Type: type, rhs: Type) @TypeOf(@abs(rhs)) { +inline fn abs(comptime Type: type, rhs: Type) AsSignedness(Type, .unsigned) { return @abs(rhs); } test abs { @@ -4950,7 +4971,7 @@ test abs { try test_abs.testFloatVectors(); } -inline fn floor(comptime Type: type, rhs: Type) @TypeOf(@floor(rhs)) { +inline fn floor(comptime Type: type, rhs: Type) Type { return @floor(rhs); } test floor { @@ -4959,7 +4980,7 @@ test floor { try test_floor.testFloatVectors(); } -inline fn ceil(comptime Type: type, rhs: Type) @TypeOf(@ceil(rhs)) { +inline fn ceil(comptime Type: type, rhs: Type) Type { return @ceil(rhs); } test ceil { @@ -4968,7 +4989,7 @@ test ceil { try test_ceil.testFloatVectors(); } -inline fn round(comptime Type: type, rhs: Type) @TypeOf(@round(rhs)) { +inline fn round(comptime Type: type, rhs: Type) Type { return @round(rhs); } test round { @@ -4977,7 +4998,7 @@ test round { try test_round.testFloatVectors(); } -inline fn trunc(comptime Type: type, rhs: Type) @TypeOf(@trunc(rhs)) { +inline fn trunc(comptime Type: type, rhs: Type) Type { return @trunc(rhs); } test trunc { @@ -4986,7 +5007,7 @@ test trunc { try test_trunc.testFloatVectors(); } -inline fn negate(comptime Type: type, rhs: Type) @TypeOf(-rhs) { +inline fn negate(comptime Type: type, rhs: Type) Type { return -rhs; } test negate { @@ -5098,40 +5119,40 @@ test reduceXor { try test_reduce_xor.testIntVectors(); } -inline fn reduceMin(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { +inline fn reduceMinUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { return @reduce(.Min, rhs); } -test reduceMin { - const test_reduce_min = unary(reduceMin, .{}); - try test_reduce_min.testIntVectors(); - try test_reduce_min.testFloatVectors(); +test reduceMinUnoptimized { + const test_reduce_min_unoptimized = unary(reduceMinUnoptimized, .{}); + try test_reduce_min_unoptimized.testIntVectors(); + try test_reduce_min_unoptimized.testFloatVectors(); } -inline fn reduceMax(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { +inline fn reduceMaxUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { return @reduce(.Max, rhs); } -test reduceMax { - const test_reduce_max = unary(reduceMax, .{}); - try test_reduce_max.testIntVectors(); - try test_reduce_max.testFloatVectors(); +test reduceMaxUnoptimized { + const test_reduce_max_unoptimized = unary(reduceMaxUnoptimized, .{}); + try test_reduce_max_unoptimized.testIntVectors(); + try test_reduce_max_unoptimized.testFloatVectors(); } -inline fn reduceAdd(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { +inline fn reduceAddUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { return @reduce(.Add, rhs); } -test reduceAdd { - const test_reduce_add = unary(reduceAdd, .{}); - try test_reduce_add.testIntVectors(); - try test_reduce_add.testFloatVectors(); +test reduceAddUnoptimized { + const test_reduce_add_unoptimized = unary(reduceAddUnoptimized, .{}); + try test_reduce_add_unoptimized.testIntVectors(); + try test_reduce_add_unoptimized.testFloatVectors(); } -inline fn reduceMul(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { +inline fn reduceMulUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { return @reduce(.Mul, rhs); } -test reduceMul { - const test_reduce_mul = unary(reduceMul, .{}); - try test_reduce_mul.testIntVectors(); - try test_reduce_mul.testFloatVectors(); +test reduceMulUnoptimized { + const test_reduce_mul_unoptimized = unary(reduceMulUnoptimized, .{}); + try test_reduce_mul_unoptimized.testIntVectors(); + try test_reduce_mul_unoptimized.testFloatVectors(); } inline fn reduceMinOptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child { |
