aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/compiler_rt/addf3_test.zig3
-rw-r--r--lib/compiler_rt/divtf3_test.zig4
-rw-r--r--lib/compiler_rt/float_from_int_test.zig22
-rw-r--r--lib/compiler_rt/fma.zig5
-rw-r--r--lib/compiler_rt/fmodx_test.zig2
-rw-r--r--lib/compiler_rt/mulf3_test.zig4
-rw-r--r--lib/compiler_rt/udivmodei4.zig6
-rw-r--r--lib/std/Build.zig1
-rw-r--r--lib/std/Build/Cache.zig8
-rw-r--r--lib/std/Thread/Condition.zig4
-rw-r--r--lib/std/Thread/Semaphore.zig2
-rw-r--r--lib/std/array_hash_map.zig15
-rw-r--r--lib/std/array_list.zig11
-rw-r--r--lib/std/base64.zig2
-rw-r--r--lib/std/bit_set.zig15
-rw-r--r--lib/std/compress/deflate/huffman_code.zig2
-rw-r--r--lib/std/compress/gzip.zig2
-rw-r--r--lib/std/compress/zlib.zig2
-rw-r--r--lib/std/crypto.zig2
-rw-r--r--lib/std/crypto/25519/x25519.zig4
-rw-r--r--lib/std/crypto/aegis.zig15
-rw-r--r--lib/std/crypto/aes.zig10
-rw-r--r--lib/std/crypto/aes_ocb.zig24
-rw-r--r--lib/std/crypto/argon2.zig10
-rw-r--r--lib/std/crypto/blake3.zig2
-rw-r--r--lib/std/crypto/chacha20.zig4
-rw-r--r--lib/std/crypto/ghash_polyval.zig2
-rw-r--r--lib/std/crypto/hash_composition.zig2
-rw-r--r--lib/std/crypto/hkdf.zig2
-rw-r--r--lib/std/crypto/kyber_d00.zig28
-rw-r--r--lib/std/crypto/pbkdf2.zig15
-rw-r--r--lib/std/crypto/phc_encoding.zig4
-rw-r--r--lib/std/crypto/salsa20.zig8
-rw-r--r--lib/std/crypto/scrypt.zig5
-rw-r--r--lib/std/crypto/sha2.zig10
-rw-r--r--lib/std/fifo.zig2
-rw-r--r--lib/std/fmt.zig2
-rw-r--r--lib/std/fs.zig12
-rw-r--r--lib/std/fs/test.zig98
-rw-r--r--lib/std/hash/xxhash.zig12
-rw-r--r--lib/std/hash_map.zig2
-rw-r--r--lib/std/heap/general_purpose_allocator.zig4
-rw-r--r--lib/std/http/Client.zig2
-rw-r--r--lib/std/io/multi_writer.zig1
-rw-r--r--lib/std/io/test.zig6
-rw-r--r--lib/std/json.zig4
-rw-r--r--lib/std/json/JSONTestSuite_test.zig66
-rw-r--r--lib/std/json/dynamic_test.zig12
-rw-r--r--lib/std/json/static_test.zig6
-rw-r--r--lib/std/json/stringify_test.zig36
-rw-r--r--lib/std/json/test.zig5
-rw-r--r--lib/std/math.zig4
-rw-r--r--lib/std/math/big/int_test.zig56
-rw-r--r--lib/std/math/big/rational.zig6
-rw-r--r--lib/std/math/ilogb.zig6
-rw-r--r--lib/std/math/log10.zig1
-rw-r--r--lib/std/math/log_int.zig2
-rw-r--r--lib/std/math/nextafter.zig3
-rw-r--r--lib/std/math/scalbn.zig2
-rw-r--r--lib/std/mem.zig10
-rw-r--r--lib/std/multi_array_list.zig16
-rw-r--r--lib/std/net/test.zig5
-rw-r--r--lib/std/os/linux/io_uring.zig26
-rw-r--r--lib/std/os/linux/test.zig6
-rw-r--r--lib/std/os/test.zig42
-rw-r--r--lib/std/os/windows.zig2
-rw-r--r--lib/std/priority_dequeue.zig2
-rw-r--r--lib/std/priority_queue.zig2
-rw-r--r--lib/std/rand/test.zig2
-rw-r--r--lib/std/segmented_list.zig2
-rw-r--r--lib/std/simd.zig11
-rw-r--r--lib/std/sort.zig11
-rw-r--r--lib/std/time.zig2
-rw-r--r--lib/std/unicode.zig41
-rw-r--r--lib/std/zig/fmt.zig3
-rw-r--r--lib/std/zig/tokenizer.zig2
-rw-r--r--src/arch/x86_64/CodeGen.zig1185
-rw-r--r--src/arch/x86_64/Emit.zig12
-rw-r--r--src/arch/x86_64/Encoding.zig47
-rw-r--r--src/arch/x86_64/Lower.zig46
-rw-r--r--src/arch/x86_64/Mir.zig116
-rw-r--r--src/arch/x86_64/bits.zig20
-rw-r--r--src/arch/x86_64/encodings.zig218
-rw-r--r--test/behavior/bitreverse.zig2
-rw-r--r--test/behavior/byteswap.zig2
-rw-r--r--test/behavior/math.zig4
-rw-r--r--test/behavior/popcount.zig1
87 files changed, 1310 insertions, 1135 deletions
diff --git a/lib/compiler_rt/addf3_test.zig b/lib/compiler_rt/addf3_test.zig
index 2644bec558..a1e39f1c3f 100644
--- a/lib/compiler_rt/addf3_test.zig
+++ b/lib/compiler_rt/addf3_test.zig
@@ -76,9 +76,6 @@ fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
}
test "subtf3" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
// qNaN - any = qNaN
try test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
diff --git a/lib/compiler_rt/divtf3_test.zig b/lib/compiler_rt/divtf3_test.zig
index 921ac93650..7639c7219d 100644
--- a/lib/compiler_rt/divtf3_test.zig
+++ b/lib/compiler_rt/divtf3_test.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const math = std.math;
const testing = std.testing;
@@ -31,9 +30,6 @@ fn test__divtf3(a: f128, b: f128, expectedHi: u64, expectedLo: u64) !void {
}
test "divtf3" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
// NaN / any = NaN
try test__divtf3(math.nan(f128), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0);
// inf / any(except inf and nan) = inf
diff --git a/lib/compiler_rt/float_from_int_test.zig b/lib/compiler_rt/float_from_int_test.zig
index 413d73f1e0..a06ba3a04e 100644
--- a/lib/compiler_rt/float_from_int_test.zig
+++ b/lib/compiler_rt/float_from_int_test.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const testing = std.testing;
const math = std.math;
@@ -126,9 +125,6 @@ fn test__floatuntisf(a: u128, expected: f32) !void {
}
test "floattisf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floattisf(0, 0.0);
try test__floattisf(1, 1.0);
@@ -175,9 +171,6 @@ test "floattisf" {
}
test "floatuntisf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floatuntisf(0, 0.0);
try test__floatuntisf(1, 1.0);
@@ -374,9 +367,6 @@ fn test__floatuntidf(a: u128, expected: f64) !void {
}
test "floattidf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floattidf(0, 0.0);
try test__floattidf(1, 1.0);
@@ -447,9 +437,6 @@ test "floattidf" {
}
test "floatuntidf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floatuntidf(0, 0.0);
try test__floatuntidf(1, 1.0);
@@ -595,9 +582,6 @@ test "floatditf" {
}
test "floatunditf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floatunditf(0xffffffffffffffff, 0x403effffffffffff, 0xfffe000000000000);
try test__floatunditf(0xfffffffffffffffe, 0x403effffffffffff, 0xfffc000000000000);
try test__floatunditf(0x8000000000000000, 0x403e000000000000, 0x0);
@@ -619,9 +603,6 @@ fn test__floatuntitf(a: u128, expected: f128) !void {
}
test "floattitf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floattitf(0, 0.0);
try test__floattitf(1, 1.0);
@@ -704,9 +685,6 @@ test "floattitf" {
}
test "floatuntitf" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
try test__floatuntitf(0, 0.0);
try test__floatuntitf(1, 1.0);
diff --git a/lib/compiler_rt/fma.zig b/lib/compiler_rt/fma.zig
index 9580f94fcb..accc4ed36c 100644
--- a/lib/compiler_rt/fma.zig
+++ b/lib/compiler_rt/fma.zig
@@ -6,10 +6,8 @@
//! https://git.musl-libc.org/cgit/musl/tree/src/math/fma.c
const std = @import("std");
-const builtin = @import("builtin");
const math = std.math;
const expect = std.testing.expect;
-const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
@@ -343,9 +341,6 @@ test "64" {
}
test "128" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
const epsilon = 0.000001;
try expect(math.approxEqAbs(f128, fmaq(0.0, 5.0, 9.124), 9.124, epsilon));
diff --git a/lib/compiler_rt/fmodx_test.zig b/lib/compiler_rt/fmodx_test.zig
index d7020a50f2..5bf668134b 100644
--- a/lib/compiler_rt/fmodx_test.zig
+++ b/lib/compiler_rt/fmodx_test.zig
@@ -24,7 +24,7 @@ fn test_fmodx_infs() !void {
test "fmodx" {
if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
+ !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .lzcnt)) return error.SkipZigTest;
try test_fmodx(6.4, 4.0, 2.4);
try test_fmodx(6.4, -4.0, 2.4);
diff --git a/lib/compiler_rt/mulf3_test.zig b/lib/compiler_rt/mulf3_test.zig
index 224e79e231..e770ecaefc 100644
--- a/lib/compiler_rt/mulf3_test.zig
+++ b/lib/compiler_rt/mulf3_test.zig
@@ -3,7 +3,6 @@
// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/test/builtins/Unit/multf3_test.c
const std = @import("std");
-const builtin = @import("builtin");
const math = std.math;
const qnan128: f128 = @bitCast(@as(u128, 0x7fff800000000000) << 64);
const inf128: f128 = @bitCast(@as(u128, 0x7fff000000000000) << 64);
@@ -49,9 +48,6 @@ fn makeNaN128(rand: u64) f128 {
return @bitCast(int_result);
}
test "multf3" {
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .bmi, .lzcnt })) return error.SkipZigTest;
-
// qNaN * any = qNaN
try test__multf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig
index 26b9a501fa..ff37a0cd27 100644
--- a/lib/compiler_rt/udivmodei4.zig
+++ b/lib/compiler_rt/udivmodei4.zig
@@ -129,10 +129,8 @@ pub fn __umodei4(r_p: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize)
}
test "__udivei4/__umodei4" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const RndGen = std.rand.DefaultPrng;
var rnd = RndGen.init(42);
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 5316f405ae..9b1677c40f 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -2156,5 +2156,6 @@ pub fn hex64(x: u64) [16]u8 {
}
test {
+ _ = Cache;
_ = Step;
}
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index 5dcdeca079..a5943be6bb 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -1007,8 +1007,6 @@ test "cache file and then recall it" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@@ -1075,8 +1073,6 @@ test "check that changing a file makes cache fail" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@@ -1151,8 +1147,6 @@ test "no file inputs" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@@ -1201,8 +1195,6 @@ test "Manifest with files added after initial hash work" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig
index 549ea623dd..9b8ef919a6 100644
--- a/lib/std/Thread/Condition.zig
+++ b/lib/std/Thread/Condition.zig
@@ -324,6 +324,8 @@ test "Condition - wait and signal" {
return error.SkipZigTest;
}
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const num_threads = 4;
const MultiWait = struct {
@@ -369,8 +371,6 @@ test "Condition - signal" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const num_threads = 4;
const SignalTest = struct {
diff --git a/lib/std/Thread/Semaphore.zig b/lib/std/Thread/Semaphore.zig
index 1b182d4c2a..0c04e8a859 100644
--- a/lib/std/Thread/Semaphore.zig
+++ b/lib/std/Thread/Semaphore.zig
@@ -39,6 +39,8 @@ test "Thread.Semaphore" {
return error.SkipZigTest;
}
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
const TestContext = struct {
sem: *Semaphore,
n: *i32,
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index 5090bc0d81..13ade92ceb 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -1,5 +1,4 @@
const std = @import("std.zig");
-const builtin = @import("builtin");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
@@ -2138,8 +2137,6 @@ test "ensure capacity leak" {
}
test "big map" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
@@ -2193,8 +2190,6 @@ test "big map" {
}
test "clone" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer original.deinit();
@@ -2221,8 +2216,6 @@ test "clone" {
}
test "shrink" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
@@ -2263,8 +2256,6 @@ test "shrink" {
}
test "pop" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
@@ -2283,8 +2274,6 @@ test "pop" {
}
test "popOrNull" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
@@ -2305,7 +2294,7 @@ test "popOrNull" {
}
test "reIndex" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator);
defer map.deinit();
@@ -2351,8 +2340,6 @@ test "auto store_hash" {
}
test "sort" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 03e32278cf..e50eb92041 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -1,5 +1,4 @@
const std = @import("std.zig");
-const builtin = @import("builtin");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
@@ -1184,8 +1183,6 @@ test "std.ArrayList/ArrayListUnmanaged.initCapacity" {
}
test "std.ArrayList/ArrayListUnmanaged.clone" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const a = testing.allocator;
{
var array = ArrayList(i32).init(a);
@@ -1227,8 +1224,6 @@ test "std.ArrayList/ArrayListUnmanaged.clone" {
}
test "std.ArrayList/ArrayListUnmanaged.basic" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
@@ -1513,8 +1508,6 @@ test "std.ArrayList/ArrayListUnmanaged.insert" {
}
test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
@@ -1561,8 +1554,6 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
}
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const a = arena.allocator();
@@ -1734,8 +1725,6 @@ test "shrink still sets length when resizing is disabled" {
}
test "shrinkAndFree with a copy" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index bfcdabaec5..2ec8fb5678 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -363,8 +363,6 @@ test "base64" {
}
test "base64 padding dest overflow" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const input = "foo";
var expect: [128]u8 = undefined;
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index f4bad13f8a..d87c1d26f6 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -1637,10 +1637,8 @@ fn testStaticBitSet(comptime Set: type) !void {
}
test "IntegerBitSet" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try testStaticBitSet(IntegerBitSet(0));
try testStaticBitSet(IntegerBitSet(1));
@@ -1653,10 +1651,7 @@ test "IntegerBitSet" {
}
test "ArrayBitSet" {
- switch (builtin.zig_backend) {
- .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| {
try testStaticBitSet(ArrayBitSet(u8, size));
@@ -1668,8 +1663,6 @@ test "ArrayBitSet" {
}
test "DynamicBitSetUnmanaged" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const allocator = std.testing.allocator;
var a = try DynamicBitSetUnmanaged.initEmpty(allocator, 300);
try testing.expectEqual(@as(usize, 0), a.count());
@@ -1723,8 +1716,6 @@ test "DynamicBitSetUnmanaged" {
}
test "DynamicBitSet" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const allocator = std.testing.allocator;
var a = try DynamicBitSet.initEmpty(allocator, 300);
try testing.expectEqual(@as(usize, 0), a.count());
diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig
index 864da16e61..4fea45f863 100644
--- a/lib/std/compress/deflate/huffman_code.zig
+++ b/lib/std/compress/deflate/huffman_code.zig
@@ -360,8 +360,6 @@ fn byFreq(context: void, a: LiteralNode, b: LiteralNode) bool {
}
test "generate a Huffman code from an array of frequencies" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var freqs: [19]u16 = [_]u16{
8, // 0
1, // 1
diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig
index b0928850e1..f6fb038ae3 100644
--- a/lib/std/compress/gzip.zig
+++ b/lib/std/compress/gzip.zig
@@ -174,8 +174,6 @@ fn testReader(data: []const u8, comptime expected: []const u8) !void {
// https://tools.ietf.org/rfc/rfc1952.txt length=25037 bytes
// SHA256=164ef0897b4cbec63abf1b57f069f3599bd0fb7c72c2a4dee21bd7e03ec9af67
test "compressed data" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testReader(
@embedFile("testdata/rfc1952.txt.gz"),
@embedFile("testdata/rfc1952.txt"),
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index 8dd9853e27..2e2edce434 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -199,8 +199,6 @@ fn testDecompress(data: []const u8, expected: []const u8) !void {
// https://tools.ietf.org/rfc/rfc1951.txt length=36944 bytes
// SHA256=5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009
test "compressed data" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const rfc1951_txt = @embedFile("testdata/rfc1951.txt");
// Compressed with compression level = 0
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index 8cb2cfd90d..c548f01c07 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -314,8 +314,6 @@ test "CSPRNG" {
}
test "issue #4532: no index out of bounds" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const types = [_]type{
hash.Md5,
hash.Sha1,
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index 7da73f2480..f5299082d2 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -137,8 +137,6 @@ test "x25519 rfc7748 one iteration" {
}
test "x25519 rfc7748 1,000 iterations" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// These iteration tests are slow so we always skip them. Results have been verified.
if (true) {
return error.SkipZigTest;
@@ -161,8 +159,6 @@ test "x25519 rfc7748 1,000 iterations" {
}
test "x25519 rfc7748 1,000,000 iterations" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
if (true) {
return error.SkipZigTest;
}
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index 5a7f2ec017..18047a13b5 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -17,7 +17,6 @@
//! https://datatracker.ietf.org/doc/draft-irtf-cfrg-aegis-aead/
const std = @import("std");
-const builtin = @import("builtin");
const crypto = std.crypto;
const mem = std.mem;
const assert = std.debug.assert;
@@ -514,8 +513,6 @@ const htest = @import("test.zig");
const testing = std.testing;
test "Aegis128L test vector 1" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis128L.key_length]u8 = [_]u8{ 0x10, 0x01 } ++ [_]u8{0x00} ** 14;
const nonce: [Aegis128L.nonce_length]u8 = [_]u8{ 0x10, 0x00, 0x02 } ++ [_]u8{0x00} ** 13;
const ad = [8]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
@@ -539,8 +536,6 @@ test "Aegis128L test vector 1" {
}
test "Aegis128L test vector 2" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis128L.key_length]u8 = [_]u8{0x00} ** 16;
const nonce: [Aegis128L.nonce_length]u8 = [_]u8{0x00} ** 16;
const ad = [_]u8{};
@@ -558,8 +553,6 @@ test "Aegis128L test vector 2" {
}
test "Aegis128L test vector 3" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis128L.key_length]u8 = [_]u8{0x00} ** 16;
const nonce: [Aegis128L.nonce_length]u8 = [_]u8{0x00} ** 16;
const ad = [_]u8{};
@@ -576,8 +569,6 @@ test "Aegis128L test vector 3" {
}
test "Aegis256 test vector 1" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis256.key_length]u8 = [_]u8{ 0x10, 0x01 } ++ [_]u8{0x00} ** 30;
const nonce: [Aegis256.nonce_length]u8 = [_]u8{ 0x10, 0x00, 0x02 } ++ [_]u8{0x00} ** 29;
const ad = [8]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
@@ -601,8 +592,6 @@ test "Aegis256 test vector 1" {
}
test "Aegis256 test vector 2" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis256.key_length]u8 = [_]u8{0x00} ** 32;
const nonce: [Aegis256.nonce_length]u8 = [_]u8{0x00} ** 32;
const ad = [_]u8{};
@@ -620,8 +609,6 @@ test "Aegis256 test vector 2" {
}
test "Aegis256 test vector 3" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key: [Aegis256.key_length]u8 = [_]u8{0x00} ** 32;
const nonce: [Aegis256.nonce_length]u8 = [_]u8{0x00} ** 32;
const ad = [_]u8{};
@@ -638,8 +625,6 @@ test "Aegis256 test vector 3" {
}
test "Aegis MAC" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key = [_]u8{0x00} ** Aegis128LMac.key_length;
var msg: [64]u8 = undefined;
for (&msg, 0..) |*m, i| {
diff --git a/lib/std/crypto/aes.zig b/lib/std/crypto/aes.zig
index 875898d2e5..fb3246b0a0 100644
--- a/lib/std/crypto/aes.zig
+++ b/lib/std/crypto/aes.zig
@@ -6,7 +6,7 @@ const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes);
const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
// C backend doesn't currently support passing vectors to inline asm.
-const impl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and has_aesni and has_avx) impl: {
+const impl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_c and builtin.zig_backend != .stage2_x86_64 and has_aesni and has_avx) impl: {
break :impl @import("aes/aesni.zig");
} else if (builtin.cpu.arch == .aarch64 and builtin.zig_backend != .stage2_c and has_armaes)
impl: {
@@ -55,8 +55,6 @@ test "ctr" {
}
test "encrypt" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// Appendix B
{
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
@@ -86,8 +84,6 @@ test "encrypt" {
}
test "decrypt" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// Appendix B
{
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
@@ -117,8 +113,6 @@ test "decrypt" {
}
test "expand 128-bit key" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c };
const exp_enc = [_]*const [32:0]u8{
"2b7e151628aed2a6abf7158809cf4f3c", "a0fafe1788542cb123a339392a6c7605", "f2c295f27a96b9435935807a7359f67f", "3d80477d4716fe3e1e237e446d7a883b", "ef44a541a8525b7fb671253bdb0bad00", "d4d1c6f87c839d87caf2b8bc11f915bc", "6d88a37a110b3efddbf98641ca0093fd", "4e54f70e5f5fc9f384a64fb24ea6dc4f", "ead27321b58dbad2312bf5607f8d292f", "ac7766f319fadc2128d12941575c006e", "d014f9a8c9ee2589e13f0cc8b6630ca6",
@@ -141,8 +135,6 @@ test "expand 128-bit key" {
}
test "expand 256-bit key" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key = [_]u8{
0x60, 0x3d, 0xeb, 0x10,
0x15, 0xca, 0x71, 0xbe,
diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig
index 0209d07ec6..26f98ff362 100644
--- a/lib/std/crypto/aes_ocb.zig
+++ b/lib/std/crypto/aes_ocb.zig
@@ -261,10 +261,8 @@ inline fn xorWith(x: *Block, y: Block) void {
const hexToBytes = std.fmt.hexToBytes;
test "AesOcb test vector 1" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var k: [Aes128Ocb.key_length]u8 = undefined;
var nonce: [Aes128Ocb.nonce_length]u8 = undefined;
@@ -283,10 +281,8 @@ test "AesOcb test vector 1" {
}
test "AesOcb test vector 2" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var k: [Aes128Ocb.key_length]u8 = undefined;
var nonce: [Aes128Ocb.nonce_length]u8 = undefined;
@@ -307,10 +303,8 @@ test "AesOcb test vector 2" {
}
test "AesOcb test vector 3" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var k: [Aes128Ocb.key_length]u8 = undefined;
var nonce: [Aes128Ocb.nonce_length]u8 = undefined;
@@ -334,10 +328,8 @@ test "AesOcb test vector 3" {
}
test "AesOcb test vector 4" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var k: [Aes128Ocb.key_length]u8 = undefined;
var nonce: [Aes128Ocb.nonce_length]u8 = undefined;
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 05dd686aac..3625bb39fd 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -622,8 +622,6 @@ pub fn strVerify(
}
test "argon2d" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const password = [_]u8{0x01} ** 32;
const salt = [_]u8{0x02} ** 16;
const secret = [_]u8{0x03} ** 8;
@@ -649,8 +647,6 @@ test "argon2d" {
}
test "argon2i" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const password = [_]u8{0x01} ** 32;
const salt = [_]u8{0x02} ** 16;
const secret = [_]u8{0x03} ** 8;
@@ -676,8 +672,6 @@ test "argon2i" {
}
test "argon2id" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const password = [_]u8{0x01} ** 32;
const salt = [_]u8{0x02} ** 16;
const secret = [_]u8{0x03} ** 8;
@@ -703,8 +697,6 @@ test "argon2id" {
}
test "kdf" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const password = "password";
const salt = "somesalt";
@@ -936,8 +928,6 @@ test "password hash and password verify" {
}
test "kdf derived key length" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const allocator = std.testing.allocator;
const password = "testpass";
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 08c38ec63b..6923ec378d 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -682,8 +682,6 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
}
test "BLAKE3 reference test cases" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var hash_state = Blake3.init(.{});
const hash = &hash_state;
var keyed_hash_state = Blake3.init(.{ .key = reference_test.key.* });
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index c6a2ddafe7..1b13634d96 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -759,8 +759,6 @@ fn XChaChaPoly1305(comptime rounds_nb: usize) type {
}
test "chacha20 AEAD API" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const aeads = [_]type{ ChaCha20Poly1305, XChaCha20Poly1305 };
const m = "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.";
const ad = "Additional data";
@@ -782,8 +780,6 @@ test "chacha20 AEAD API" {
// https://tools.ietf.org/html/rfc7539#section-2.4.2
test "crypto.chacha20 test vector sunscreen" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const expected_result = [_]u8{
0x6e, 0x2e, 0x35, 0x9a, 0x25, 0x68, 0xf9, 0x80,
0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81,
diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig
index 3a78db6214..47e2a65a94 100644
--- a/lib/std/crypto/ghash_polyval.zig
+++ b/lib/std/crypto/ghash_polyval.zig
@@ -476,8 +476,6 @@ test "ghash2" {
}
test "polyval" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key = [_]u8{0x42} ** 16;
const m = [_]u8{0x69} ** 256;
diff --git a/lib/std/crypto/hash_composition.zig b/lib/std/crypto/hash_composition.zig
index 234a990a83..1ffa3d4c47 100644
--- a/lib/std/crypto/hash_composition.zig
+++ b/lib/std/crypto/hash_composition.zig
@@ -65,8 +65,6 @@ pub const Sha384oSha384 = Composition(sha2.Sha384, sha2.Sha384);
pub const Sha512oSha512 = Composition(sha2.Sha512, sha2.Sha512);
test "Hash composition" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Sha256 = sha2.Sha256;
const msg = "test";
diff --git a/lib/std/crypto/hkdf.zig b/lib/std/crypto/hkdf.zig
index f0865a9d5e..9163ba9d15 100644
--- a/lib/std/crypto/hkdf.zig
+++ b/lib/std/crypto/hkdf.zig
@@ -72,8 +72,6 @@ pub fn Hkdf(comptime Hmac: type) type {
const htest = @import("test.zig");
test "Hkdf" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ikm = [_]u8{0x0b} ** 22;
const salt = [_]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c };
const context = [_]u8{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9 };
diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig
index fa1627dbd1..ba6ed67ec8 100644
--- a/lib/std/crypto/kyber_d00.zig
+++ b/lib/std/crypto/kyber_d00.zig
@@ -553,8 +553,6 @@ const inv_ntt_reductions = [_]i16{
};
test "invNTTReductions bounds" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// Checks whether the reductions proposed by invNTTReductions
// don't overflow during invNTT().
var xs = [_]i32{1} ** 256; // start at |x| ≤ q
@@ -658,8 +656,6 @@ fn montReduce(x: i32) i16 {
}
test "Test montReduce" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var rnd = RndGen.init(0);
for (0..1000) |_| {
const bound = comptime @as(i32, Q) * (1 << 15);
@@ -678,8 +674,6 @@ fn feToMont(x: i16) i16 {
}
test "Test feToMont" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var x: i32 = -(1 << 15);
while (x < 1 << 15) : (x += 1) {
const y = feToMont(@as(i16, @intCast(x)));
@@ -713,8 +707,6 @@ fn feBarrettReduce(x: i16) i16 {
}
test "Test Barrett reduction" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var x: i32 = -(1 << 15);
while (x < 1 << 15) : (x += 1) {
var y1 = feBarrettReduce(@as(i16, @intCast(x)));
@@ -735,8 +727,6 @@ fn csubq(x: i16) i16 {
}
test "Test csubq" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var x: i32 = -29439;
while (x < 1 << 15) : (x += 1) {
const y1 = csubq(@as(i16, @intCast(x)));
@@ -1476,8 +1466,6 @@ fn cmov(comptime len: usize, dst: *[len]u8, src: [len]u8, b: u1) void {
}
test "MulHat" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var rnd = RndGen.init(0);
for (0..100) |_| {
@@ -1509,8 +1497,6 @@ test "MulHat" {
}
test "NTT" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var rnd = RndGen.init(0);
for (0..1000) |_| {
@@ -1534,8 +1520,6 @@ test "NTT" {
}
test "Compression" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var rnd = RndGen.init(0);
inline for (.{ 1, 4, 5, 10, 11 }) |d| {
for (0..1000) |_| {
@@ -1548,8 +1532,6 @@ test "Compression" {
}
test "noise" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var seed: [32]u8 = undefined;
for (&seed, 0..) |*s, i| {
s.* = @as(u8, @intCast(i));
@@ -1596,8 +1578,6 @@ test "noise" {
}
test "uniform sampling" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var seed: [32]u8 = undefined;
for (&seed, 0..) |*s, i| {
s.* = @as(u8, @intCast(i));
@@ -1631,8 +1611,6 @@ test "uniform sampling" {
}
test "Polynomial packing" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var rnd = RndGen.init(0);
for (0..1000) |_| {
@@ -1642,8 +1620,6 @@ test "Polynomial packing" {
}
test "Test inner PKE" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var seed: [32]u8 = undefined;
var pt: [32]u8 = undefined;
for (&seed, &pt, 0..) |*s, *p, i| {
@@ -1665,8 +1641,6 @@ test "Test inner PKE" {
}
test "Test happy flow" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var seed: [64]u8 = undefined;
for (&seed, 0..) |*s, i| {
s.* = @as(u8, @intCast(i));
@@ -1693,8 +1667,6 @@ test "Test happy flow" {
const sha2 = crypto.hash.sha2;
test "NIST KAT test" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
inline for (.{
.{ Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547" },
.{ Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5" },
diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig
index 126935f02c..2e0318369b 100644
--- a/lib/std/crypto/pbkdf2.zig
+++ b/lib/std/crypto/pbkdf2.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const mem = std.mem;
const maxInt = std.math.maxInt;
const OutputTooLongError = std.crypto.errors.OutputTooLongError;
@@ -152,8 +151,6 @@ const HmacSha1 = std.crypto.auth.hmac.HmacSha1;
// RFC 6070 PBKDF2 HMAC-SHA1 Test Vectors
test "RFC 6070 one iteration" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const p = "password";
const s = "salt";
const c = 1;
@@ -169,8 +166,6 @@ test "RFC 6070 one iteration" {
}
test "RFC 6070 two iterations" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const p = "password";
const s = "salt";
const c = 2;
@@ -186,8 +181,6 @@ test "RFC 6070 two iterations" {
}
test "RFC 6070 4096 iterations" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const p = "password";
const s = "salt";
const c = 4096;
@@ -203,8 +196,6 @@ test "RFC 6070 4096 iterations" {
}
test "RFC 6070 16,777,216 iterations" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// These iteration tests are slow so we always skip them. Results have been verified.
if (true) {
return error.SkipZigTest;
@@ -225,8 +216,6 @@ test "RFC 6070 16,777,216 iterations" {
}
test "RFC 6070 multi-block salt and password" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const p = "passwordPASSWORDpassword";
const s = "saltSALTsaltSALTsaltSALTsaltSALTsalt";
const c = 4096;
@@ -242,8 +231,6 @@ test "RFC 6070 multi-block salt and password" {
}
test "RFC 6070 embedded NUL" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const p = "pass\x00word";
const s = "sa\x00lt";
const c = 4096;
@@ -259,8 +246,6 @@ test "RFC 6070 embedded NUL" {
}
test "Very large dk_len" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// This test allocates 8GB of memory and is expected to take several hours to run.
if (true) {
return error.SkipZigTest;
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index 58757cec28..396e30edf5 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -351,16 +351,12 @@ test "phc format - encoding/decoding" {
}
test "phc format - empty input string" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const s = "";
const v = deserialize(struct { alg_id: []const u8 }, s);
try std.testing.expectError(Error.InvalidEncoding, v);
}
test "phc format - hash without salt" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const s = "$scrypt";
const v = deserialize(struct { alg_id: []const u8, hash: BinValue(16) }, s);
try std.testing.expectError(Error.InvalidEncoding, v);
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index a253bf5dd9..e5d1dedd25 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -302,7 +302,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
};
}
-const SalsaImpl = if (builtin.cpu.arch == .x86_64) SalsaVecImpl else SalsaNonVecImpl;
+const SalsaImpl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_x86_64) SalsaVecImpl else SalsaNonVecImpl;
fn keyToWords(key: [32]u8) [8]u32 {
var k: [8]u32 = undefined;
@@ -555,8 +555,6 @@ pub const SealedBox = struct {
const htest = @import("test.zig");
test "(x)salsa20" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const key = [_]u8{0x69} ** 32;
const nonce = [_]u8{0x42} ** 8;
const msg = [_]u8{0} ** 20;
@@ -571,8 +569,6 @@ test "(x)salsa20" {
}
test "xsalsa20poly1305" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var msg: [100]u8 = undefined;
var msg2: [msg.len]u8 = undefined;
var c: [msg.len]u8 = undefined;
@@ -588,8 +584,6 @@ test "xsalsa20poly1305" {
}
test "xsalsa20poly1305 secretbox" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var msg: [100]u8 = undefined;
var msg2: [msg.len]u8 = undefined;
var key: [XSalsa20Poly1305.key_length]u8 = undefined;
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index f830e8f120..8745a3b34e 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -3,7 +3,6 @@
// https://github.com/Tarsnap/scrypt
const std = @import("std");
-const builtin = @import("builtin");
const crypto = std.crypto;
const fmt = std.fmt;
const io = std.io;
@@ -684,8 +683,6 @@ test "unix-scrypt" {
}
test "crypt format" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const str = "$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D";
const params = try crypt_format.deserialize(crypt_format.HashResult(32), str);
var buf: [str.len]u8 = undefined;
@@ -694,8 +691,6 @@ test "crypt format" {
}
test "kdf fast" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const TestVector = struct {
password: []const u8,
salt: []const u8,
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index 4f09294a30..bbb51cc3a3 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -406,16 +406,12 @@ fn Sha2x32(comptime params: Sha2Params32) type {
}
test "sha224 single" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try htest.assertEqualHash(Sha224, "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", "");
try htest.assertEqualHash(Sha224, "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc");
try htest.assertEqualHash(Sha224, "c97ca9a559850ce97a04a96def6d99a9e0e0e2ab14e6b8df265fc0b3", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha224 streaming" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var h = Sha224.init(.{});
var out: [28]u8 = undefined;
@@ -436,16 +432,12 @@ test "sha224 streaming" {
}
test "sha256 single" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try htest.assertEqualHash(Sha256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "");
try htest.assertEqualHash(Sha256, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc");
try htest.assertEqualHash(Sha256, "cf5b16a778af8380036ce59e7b0492370b249b11e8f07a51afac45037afee9d1", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
}
test "sha256 streaming" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var h = Sha256.init(.{});
var out: [32]u8 = undefined;
@@ -466,8 +458,6 @@ test "sha256 streaming" {
}
test "sha256 aligned final" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var block = [_]u8{0} ** Sha256.block_length;
var out: [Sha256.digest_length]u8 = undefined;
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index 72fd44e02d..5b19125e02 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -505,8 +505,6 @@ test "LinearFifo(u8, .Dynamic)" {
}
test "LinearFifo" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
inline for ([_]type{ u1, u8, u16, u64 }) |T| {
inline for ([_]LinearFifoBufferType{ LinearFifoBufferType{ .Static = 32 }, .Slice, .Dynamic }) |bt| {
const FifoType = LinearFifo(T, bt);
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 611737161e..4b6514557d 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -2751,8 +2751,6 @@ test "formatType max_depth" {
}
test "positional" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expectFmt("2 1 0", "{2} {1} {0}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) });
try expectFmt("2 1 0", "{2} {1} {}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) });
try expectFmt("0 0", "{0} {0}", .{@as(usize, 0)});
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index cf58251c5a..187e2d7ec1 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -3244,15 +3244,15 @@ fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t, maybe_size: ?u64) CopyFileRawError
}
test {
- if (builtin.zig_backend != .stage2_x86_64) {
- if (builtin.os.tag != .wasi) {
- _ = &makeDirAbsolute;
- _ = &makeDirAbsoluteZ;
- _ = &copyFileAbsolute;
+ if (builtin.os.tag != .wasi) {
+ _ = &makeDirAbsolute;
+ _ = &makeDirAbsoluteZ;
+ _ = &copyFileAbsolute;
+ if (builtin.zig_backend != .stage2_x86_64) {
_ = &updateFileAbsolute;
}
- _ = &Dir.copyFile;
}
+ _ = &Dir.copyFile;
_ = @import("fs/test.zig");
_ = @import("fs/path.zig");
_ = @import("fs/file.zig");
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index 2d724a7c2c..97e9b8af71 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -124,8 +124,6 @@ fn testWithAllSupportedPathTypes(test_func: anytype) !void {
}
test "Dir.readLink" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
// Create some targets
@@ -163,8 +161,6 @@ fn testReadLink(dir: Dir, target_path: []const u8, symlink_path: []const u8) !vo
}
test "relative symlink to parent directory" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -182,8 +178,6 @@ test "relative symlink to parent directory" {
}
test "openDir" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -202,8 +196,6 @@ test "openDir" {
test "accessAbsolute" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -222,8 +214,6 @@ test "accessAbsolute" {
test "openDirAbsolute" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -262,8 +252,6 @@ test "openDir non-cwd parent .." {
else => {},
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -285,8 +273,6 @@ test "openDir non-cwd parent .." {
test "readLinkAbsolute" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -337,8 +323,6 @@ fn testReadLinkAbsolute(target_path: []const u8, symlink_path: []const u8) !void
}
test "Dir.Iterator" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpIterableDir(.{});
defer tmp_dir.cleanup();
@@ -369,8 +353,6 @@ test "Dir.Iterator" {
}
test "Dir.Iterator many entries" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpIterableDir(.{});
defer tmp_dir.cleanup();
@@ -406,8 +388,6 @@ test "Dir.Iterator many entries" {
}
test "Dir.Iterator twice" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpIterableDir(.{});
defer tmp_dir.cleanup();
@@ -441,8 +421,6 @@ test "Dir.Iterator twice" {
}
test "Dir.Iterator reset" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpIterableDir(.{});
defer tmp_dir.cleanup();
@@ -479,8 +457,6 @@ test "Dir.Iterator reset" {
}
test "Dir.Iterator but dir is deleted during iteration" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
@@ -523,8 +499,6 @@ fn contains(entries: *const std.ArrayList(IterableDir.Entry), el: IterableDir.En
test "Dir.realpath smoke test" {
if (!comptime std.os.isGetFdPathSupportedOnTarget(builtin.os)) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -575,8 +549,6 @@ test "Dir.realpath smoke test" {
}
test "readAllAlloc" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -613,8 +585,6 @@ test "Dir.statFile" {
// TODO: Re-enable once https://github.com/ziglang/zig/issues/17034 is solved
if (builtin.os.tag == .linux and builtin.link_libc and builtin.abi == .gnu) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_file_name = try ctx.transformPath("test_file");
@@ -630,8 +600,6 @@ test "Dir.statFile" {
}
test "directory operations on files" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_file_name = try ctx.transformPath("test_file");
@@ -661,8 +629,6 @@ test "file operations on directories" {
// TODO: fix this test on FreeBSD. https://github.com/ziglang/zig/issues/1759
if (builtin.os.tag == .freebsd) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_name = try ctx.transformPath("test_dir");
@@ -698,8 +664,6 @@ test "file operations on directories" {
}
test "makeOpenPath parent dirs do not exist" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -712,8 +676,6 @@ test "makeOpenPath parent dirs do not exist" {
}
test "deleteDir" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_path = try ctx.transformPath("test_dir");
@@ -735,8 +697,6 @@ test "deleteDir" {
}
test "Dir.rename files" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
// Rename on Windows can hit intermittent AccessDenied errors
@@ -779,8 +739,6 @@ test "Dir.rename files" {
}
test "Dir.rename directories" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
// Rename on Windows can hit intermittent AccessDenied errors
@@ -822,8 +780,6 @@ test "Dir.rename directory onto empty dir" {
// TODO: Fix on Windows, see https://github.com/ziglang/zig/issues/6364
if (builtin.os.tag == .windows) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_path = try ctx.transformPath("test_dir");
@@ -845,8 +801,6 @@ test "Dir.rename directory onto non-empty dir" {
// TODO: Fix on Windows, see https://github.com/ziglang/zig/issues/6364
if (builtin.os.tag == .windows) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_dir_path = try ctx.transformPath("test_dir");
@@ -873,8 +827,6 @@ test "Dir.rename file <-> dir" {
// TODO: Fix on Windows, see https://github.com/ziglang/zig/issues/6364
if (builtin.os.tag == .windows) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const test_file_path = try ctx.transformPath("test_file");
@@ -890,8 +842,6 @@ test "Dir.rename file <-> dir" {
}
test "rename" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir1 = tmpDir(.{});
defer tmp_dir1.cleanup();
@@ -914,8 +864,6 @@ test "rename" {
test "renameAbsolute" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp_dir = tmpDir(.{});
defer tmp_dir.cleanup();
@@ -974,8 +922,6 @@ test "openSelfExe" {
}
test "makePath, put some files in it, deleteTree" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -992,8 +938,6 @@ test "makePath, put some files in it, deleteTree" {
}
test "makePath, put some files in it, deleteTreeMinStackSize" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -1012,8 +956,6 @@ test "makePath, put some files in it, deleteTreeMinStackSize" {
test "makePath in a directory that no longer exists" {
if (builtin.os.tag == .windows) return error.SkipZigTest; // Windows returns FileBusy if attempting to remove an open dir
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
try tmp.parent_dir.deleteTree(&tmp.sub_path);
@@ -1045,8 +987,6 @@ fn testFilenameLimits(iterable_dir: IterableDir, maxed_filename: []const u8) !vo
}
test "max file name component lengths" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpIterableDir(.{});
defer tmp.cleanup();
@@ -1068,8 +1008,6 @@ test "max file name component lengths" {
}
test "writev, readv" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1112,8 +1050,6 @@ test "writev, readv" {
}
test "pwritev, preadv" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1155,8 +1091,6 @@ test "pwritev, preadv" {
}
test "access file" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const dir_path = try ctx.transformPath("os_test_tmp");
@@ -1173,8 +1107,6 @@ test "access file" {
}
test "sendfile" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1240,8 +1172,6 @@ test "sendfile" {
}
test "copyRangeAll" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1268,8 +1198,6 @@ test "copyRangeAll" {
}
test "copyFile" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
@@ -1300,8 +1228,6 @@ fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
}
test "AtomicFile" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const allocator = ctx.arena.allocator();
@@ -1328,8 +1254,6 @@ test "AtomicFile" {
test "open file with exclusive nonblocking lock twice" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
@@ -1346,8 +1270,6 @@ test "open file with exclusive nonblocking lock twice" {
test "open file with shared and exclusive nonblocking lock" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
@@ -1364,8 +1286,6 @@ test "open file with shared and exclusive nonblocking lock" {
test "open file with exclusive and shared nonblocking lock" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const filename = try ctx.transformPath("file_nonblocking_lock_test.txt");
@@ -1380,8 +1300,6 @@ test "open file with exclusive and shared nonblocking lock" {
}
test "open file with exclusive lock twice, make sure second lock waits" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
if (builtin.single_threaded) return error.SkipZigTest;
if (std.io.is_async) {
@@ -1432,8 +1350,6 @@ test "open file with exclusive lock twice, make sure second lock waits" {
test "open file with exclusive nonblocking lock twice (absolute paths)" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var random_bytes: [12]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
@@ -1468,8 +1384,6 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
test "walker" {
if (builtin.os.tag == .wasi and builtin.link_libc) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpIterableDir(.{});
defer tmp.cleanup();
@@ -1523,8 +1437,6 @@ test "walker" {
test "walker without fully iterating" {
if (builtin.os.tag == .wasi and builtin.link_libc) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpIterableDir(.{});
defer tmp.cleanup();
@@ -1637,8 +1549,6 @@ test "chmod" {
if (builtin.os.tag == .windows or builtin.os.tag == .wasi)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1661,8 +1571,6 @@ test "chown" {
if (builtin.os.tag == .windows or builtin.os.tag == .wasi)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1678,8 +1586,6 @@ test "chown" {
}
test "File.Metadata" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1763,8 +1669,6 @@ test "delete a read-only file on windows" {
if (builtin.os.tag != .windows)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@@ -1795,8 +1699,6 @@ test "delete a read-only file on windows" {
test "delete a setAsCwd directory on Windows" {
if (builtin.os.tag != .windows) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
// Set tmp dir as current working directory.
try tmp.dir.setAsCwd();
diff --git a/lib/std/hash/xxhash.zig b/lib/std/hash/xxhash.zig
index 7454abd711..6ca57973a1 100644
--- a/lib/std/hash/xxhash.zig
+++ b/lib/std/hash/xxhash.zig
@@ -862,8 +862,6 @@ test "xxhash3 iterative api" {
}
test "xxhash64" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const H = XxHash64;
try testExpect(H, 0, "", 0xef46db3751d8e999);
try testExpect(H, 0, "a", 0xd24ec4f1a98c6e5b);
@@ -875,8 +873,6 @@ test "xxhash64" {
}
test "xxhash64 smhasher" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Test = struct {
fn do() !void {
try expectEqual(verify.smhasher(XxHash64.hash), 0x024B7CF4);
@@ -888,8 +884,6 @@ test "xxhash64 smhasher" {
}
test "xxhash64 iterative api" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Test = struct {
fn do() !void {
try verify.iterativeApi(XxHash64);
@@ -901,8 +895,6 @@ test "xxhash64 iterative api" {
}
test "xxhash32" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const H = XxHash32;
try testExpect(H, 0, "", 0x02cc5d05);
@@ -915,8 +907,6 @@ test "xxhash32" {
}
test "xxhash32 smhasher" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Test = struct {
fn do() !void {
try expectEqual(verify.smhasher(XxHash32.hash), 0xBA88B743);
@@ -928,8 +918,6 @@ test "xxhash32 smhasher" {
}
test "xxhash32 iterative api" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Test = struct {
fn do() !void {
try verify.iterativeApi(XxHash32);
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index b2fe591a02..40a412bf3c 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -1873,8 +1873,6 @@ test "std.hash_map multiple removes on same metadata" {
}
test "std.hash_map put and remove loop in random order" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 235c16ed96..6dc6df3998 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -1042,8 +1042,6 @@ const TraceKind = enum {
const test_config = Config{};
test "small allocations - free in same order" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
@@ -1063,8 +1061,6 @@ test "small allocations - free in same order" {
}
test "small allocations - free in reverse order" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 2a6f12103f..478e3e8369 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -491,8 +491,6 @@ pub const Response = struct {
}
test parseInt3 {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const expectEqual = testing.expectEqual;
try expectEqual(@as(u10, 0), parseInt3("000".*));
try expectEqual(@as(u10, 418), parseInt3("418".*));
diff --git a/lib/std/io/multi_writer.zig b/lib/std/io/multi_writer.zig
index 57d58d6713..9cd4600e63 100644
--- a/lib/std/io/multi_writer.zig
+++ b/lib/std/io/multi_writer.zig
@@ -36,7 +36,6 @@ pub fn multiWriter(streams: anytype) MultiWriter(@TypeOf(streams)) {
const testing = std.testing;
test "MultiWriter" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
var f = try tmp.dir.createFile("t.txt", .{});
diff --git a/lib/std/io/test.zig b/lib/std/io/test.zig
index b51d8e8ede..46c40512e2 100644
--- a/lib/std/io/test.zig
+++ b/lib/std/io/test.zig
@@ -15,8 +15,6 @@ const native_endian = builtin.target.cpu.arch.endian();
const tmpDir = std.testing.tmpDir;
test "write a file, read it, then delete it" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -110,8 +108,6 @@ test "BitStreams with File Stream" {
}
test "File seek ops" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -139,8 +135,6 @@ test "File seek ops" {
}
test "setEndPos" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/json.zig b/lib/std/json.zig
index 2073be5250..fd8582bb3b 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -38,8 +38,6 @@ test parseFromSlice {
}
test Value {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var parsed = try parseFromSlice(Value, testing.allocator, "{\"anything\": \"goes\"}", .{});
defer parsed.deinit();
try testing.expectEqualSlices(u8, "goes", parsed.value.object.get("anything").?.string);
@@ -65,8 +63,6 @@ test writeStream {
}
test stringify {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var out = ArrayList(u8).init(testing.allocator);
defer out.deinit();
diff --git a/lib/std/json/JSONTestSuite_test.zig b/lib/std/json/JSONTestSuite_test.zig
index 97a00074d4..5d75e72159 100644
--- a/lib/std/json/JSONTestSuite_test.zig
+++ b/lib/std/json/JSONTestSuite_test.zig
@@ -104,8 +104,6 @@ test "i_string_utf16LE_no_BOM.json" {
try any("[\x00\"\x00\xe9\x00\"\x00]\x00");
}
test "i_structure_500_nested_arrays.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try any("[" ** 500 ++ "]" ** 500);
}
test "i_structure_UTF-8_BOM_empty_object.json" {
@@ -361,21 +359,15 @@ test "n_object_bracket_key.json" {
try err("{[: \"x\"}\n");
}
test "n_object_comma_instead_of_colon.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"x\", null}");
}
test "n_object_double_colon.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"x\"::\"b\"}");
}
test "n_object_emoji.json" {
try err("{\xf0\x9f\x87\xa8\xf0\x9f\x87\xad}");
}
test "n_object_garbage_at_end.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"a\" 123}");
}
test "n_object_key_with_single_quotes.json" {
@@ -385,26 +377,18 @@ test "n_object_lone_continuation_byte_in_key_and_trailing_comma.json" {
try err("{\"\xb9\":\"0\",}");
}
test "n_object_missing_colon.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\" b}");
}
test "n_object_missing_key.json" {
try err("{:\"b\"}");
}
test "n_object_missing_semicolon.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\" \"b\"}");
}
test "n_object_missing_value.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":");
}
test "n_object_no-colon.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\"");
}
test "n_object_non_string_key.json" {
@@ -417,59 +401,39 @@ test "n_object_repeated_null_null.json" {
try err("{null:null,null:null}");
}
test "n_object_several_trailing_commas.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"id\":0,,,,,}");
}
test "n_object_single_quote.json" {
try err("{'a':0}");
}
test "n_object_trailing_comma.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"id\":0,}");
}
test "n_object_trailing_comment.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}/**/");
}
test "n_object_trailing_comment_open.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}/**//");
}
test "n_object_trailing_comment_slash_open.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}//");
}
test "n_object_trailing_comment_slash_open_incomplete.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}/");
}
test "n_object_two_commas_in_a_row.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\",,\"c\":\"d\"}");
}
test "n_object_unquoted_key.json" {
try err("{a: \"b\"}");
}
test "n_object_unterminated-value.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"a");
}
test "n_object_with_single_string.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{ \"foo\" : \"bar\", \"a\" }");
}
test "n_object_with_trailing_garbage.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}#");
}
test "n_single_space.json" {
@@ -596,8 +560,6 @@ test "n_structure_close_unopened_array.json" {
try err("1]");
}
test "n_structure_comma_instead_of_closing_brace.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"x\": true,");
}
test "n_structure_double_array.json" {
@@ -628,18 +590,12 @@ test "n_structure_object_followed_by_closing_object.json" {
try err("{}}");
}
test "n_structure_object_unclosed_no_value.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"\":");
}
test "n_structure_object_with_comment.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":/*comment*/\"b\"}");
}
test "n_structure_object_with_trailing_garbage.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\": true} \"x\"");
}
test "n_structure_open_array_apostrophe.json" {
@@ -649,8 +605,6 @@ test "n_structure_open_array_comma.json" {
try err("[,");
}
test "n_structure_open_array_object.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("[{\"\":" ** 50000 ++ "\n");
}
test "n_structure_open_array_open_object.json" {
@@ -690,8 +644,6 @@ test "n_structure_single_star.json" {
try err("*");
}
test "n_structure_trailing_#.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"a\":\"b\"}#{}");
}
test "n_structure_uescaped_LF_before_string.json" {
@@ -710,8 +662,6 @@ test "n_structure_unclosed_array_unfinished_true.json" {
try err("[ false, tru");
}
test "n_structure_unclosed_object.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err("{\"asd\":\"asd\"");
}
test "n_structure_unicode-identifier.json" {
@@ -819,31 +769,21 @@ test "y_object.json" {
try ok("{\"asd\":\"sdf\", \"dfg\":\"fgh\"}");
}
test "y_object_basic.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"asd\":\"sdf\"}");
}
test "y_object_duplicated_key.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"a\":\"b\",\"a\":\"c\"}");
}
test "y_object_duplicated_key_and_value.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"a\":\"b\",\"a\":\"b\"}");
}
test "y_object_empty.json" {
try ok("{}");
}
test "y_object_empty_key.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"\":0}");
}
test "y_object_escaped_null_in_key.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"foo\\u0000bar\": 42}");
}
test "y_object_extreme_numbers.json" {
@@ -857,18 +797,12 @@ test "y_object_long_strings.json" {
try ok("{\"x\":[{\"id\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"}], \"id\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"}");
}
test "y_object_simple.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"a\":[]}");
}
test "y_object_string_unicode.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\"title\":\"\\u041f\\u043e\\u043b\\u0442\\u043e\\u0440\\u0430 \\u0417\\u0435\\u043c\\u043b\\u0435\\u043a\\u043e\\u043f\\u0430\" }");
}
test "y_object_with_newlines.json" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try ok("{\n\"a\": \"b\"\n}");
}
test "y_string_1_2_3_bytes_UTF-8_sequences.json" {
diff --git a/lib/std/json/dynamic_test.zig b/lib/std/json/dynamic_test.zig
index a3aa833abb..c3500b6826 100644
--- a/lib/std/json/dynamic_test.zig
+++ b/lib/std/json/dynamic_test.zig
@@ -125,16 +125,12 @@ fn testParse(allocator: std.mem.Allocator, json_str: []const u8) !Value {
}
test "parsing empty string gives appropriate error" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
try testing.expectError(error.UnexpectedEndOfInput, testParse(arena_allocator.allocator(), ""));
}
test "Value.array allocator should still be usable after parsing" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var parsed = try parseFromSlice(Value, std.testing.allocator, "[]", .{});
defer parsed.deinit();
@@ -195,8 +191,6 @@ test "escaped characters" {
}
test "Value.jsonStringify" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var vals = [_]Value{
.{ .integer = 1 },
.{ .integer = 2 },
@@ -263,8 +257,6 @@ test "parseFromValue(std.json.Value,...)" {
}
test "polymorphic parsing" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
if (true) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/16108
const doc =
\\{ "type": "div",
@@ -310,8 +302,6 @@ test "polymorphic parsing" {
}
test "long object value" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const value = "01234567890123456789";
const doc = "{\"key\":\"" ++ value ++ "\"}";
var fbs = std.io.fixedBufferStream(doc);
@@ -324,8 +314,6 @@ test "long object value" {
}
test "ParseOptions.max_value_len" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
diff --git a/lib/std/json/static_test.zig b/lib/std/json/static_test.zig
index c977de88e8..892ec85a50 100644
--- a/lib/std/json/static_test.zig
+++ b/lib/std/json/static_test.zig
@@ -392,8 +392,6 @@ test "parse" {
}
test "parse into enum" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const T = enum(u32) {
Foo = 42,
Bar,
@@ -478,8 +476,6 @@ test "parse into tagged union errors" {
}
test "parse into struct with no fields" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const T = struct {};
const parsed = try parseFromSlice(T, testing.allocator, "{}", .{});
defer parsed.deinit();
@@ -949,8 +945,6 @@ test "json parse allocate when streaming" {
}
test "parse at comptime" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const doc =
\\{
\\ "vals": {
diff --git a/lib/std/json/stringify_test.zig b/lib/std/json/stringify_test.zig
index d2c4104316..7a32d0e21a 100644
--- a/lib/std/json/stringify_test.zig
+++ b/lib/std/json/stringify_test.zig
@@ -100,8 +100,6 @@ fn getJsonObject(allocator: std.mem.Allocator) !Value {
}
test "stringify null optional fields" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const MyStruct = struct {
optional: ?[]const u8 = null,
required: []const u8 = "something",
@@ -123,8 +121,6 @@ test "stringify null optional fields" {
}
test "stringify basic types" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("false", false, .{});
try testStringify("true", true, .{});
try testStringify("null", @as(?u8, null), .{});
@@ -141,8 +137,6 @@ test "stringify basic types" {
}
test "stringify string" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("\"hello\"", "hello", .{});
try testStringify("\"with\\nescapes\\r\"", "with\nescapes\r", .{});
try testStringify("\"with\\nescapes\\r\"", "with\nescapes\r", .{ .escape_unicode = true });
@@ -167,16 +161,12 @@ test "stringify string" {
}
test "stringify many-item sentinel-terminated string" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("\"hello\"", @as([*:0]const u8, "hello"), .{});
try testStringify("\"with\\nescapes\\r\"", @as([*:0]const u8, "with\nescapes\r"), .{ .escape_unicode = true });
try testStringify("\"with unicode\\u0001\"", @as([*:0]const u8, "with unicode\u{1}"), .{ .escape_unicode = true });
}
test "stringify enums" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const E = enum {
foo,
bar,
@@ -186,15 +176,11 @@ test "stringify enums" {
}
test "stringify enum literals" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("\"foo\"", .foo, .{});
try testStringify("\"bar\"", .bar, .{});
}
test "stringify tagged unions" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const T = union(enum) {
nothing,
foo: u32,
@@ -206,8 +192,6 @@ test "stringify tagged unions" {
}
test "stringify struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("{\"foo\":42}", struct {
foo: u32,
}{ .foo = 42 }, .{});
@@ -230,8 +214,6 @@ test "emit_strings_as_arrays" {
}
test "stringify struct with indentation" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify(
\\{
\\ "foo": 42,
@@ -277,8 +259,6 @@ test "stringify struct with indentation" {
}
test "stringify struct with void field" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("{\"foo\":42}", struct {
foo: u32,
bar: void = {},
@@ -286,8 +266,6 @@ test "stringify struct with void field" {
}
test "stringify array of structs" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const MyStruct = struct {
foo: u32,
};
@@ -299,8 +277,6 @@ test "stringify array of structs" {
}
test "stringify struct with custom stringifier" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("[\"something special\",42]", struct {
foo: u32,
const Self = @This();
@@ -315,16 +291,12 @@ test "stringify struct with custom stringifier" {
}
test "stringify vector" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("[1,1]", @as(@Vector(2, u32), @splat(1)), .{});
try testStringify("\"AA\"", @as(@Vector(2, u8), @splat('A')), .{});
try testStringify("[65,65]", @as(@Vector(2, u8), @splat('A')), .{ .emit_strings_as_arrays = true });
}
test "stringify tuple" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("[\"foo\",42]", std.meta.Tuple(&.{ []const u8, usize }){ "foo", 42 }, .{});
}
@@ -411,8 +383,6 @@ fn testStringifyArbitraryDepth(expected: []const u8, value: anytype, options: St
}
test "stringify alloc" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const allocator = std.testing.allocator;
const expected =
\\{"foo":"bar","answer":42,"my_friend":"sammy"}
@@ -424,8 +394,6 @@ test "stringify alloc" {
}
test "comptime stringify" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
comptime testStringifyMaxDepth("false", false, .{}, null) catch unreachable;
comptime testStringifyMaxDepth("false", false, .{}, 0) catch unreachable;
comptime testStringifyArbitraryDepth("false", false, .{}) catch unreachable;
@@ -446,8 +414,6 @@ test "comptime stringify" {
}
test "print" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var out_buf: [1024]u8 = undefined;
var slice_stream = std.io.fixedBufferStream(&out_buf);
const out = slice_stream.writer();
@@ -479,8 +445,6 @@ test "print" {
}
test "nonportable numbers" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testStringify("9999999999999999", 9999999999999999, .{});
try testStringify("\"9999999999999999\"", 9999999999999999, .{ .emit_nonportable_numbers_as_strings = true });
}
diff --git a/lib/std/json/test.zig b/lib/std/json/test.zig
index 453a324b20..51c54a14aa 100644
--- a/lib/std/json/test.zig
+++ b/lib/std/json/test.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const testing = std.testing;
const parseFromSlice = @import("./static.zig").parseFromSlice;
const validate = @import("./scanner.zig").validate;
@@ -35,15 +34,13 @@ fn testHighLevelDynamicParser(s: []const u8) !void {
// Additional tests not part of test JSONTestSuite.
test "y_trailing_comma_after_empty" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
try roundTrip(
\\{"1":[],"2":{},"3":"4"}
);
}
test "n_object_closed_missing_value" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try err(
\\{"a":}
);
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 57376f4d61..f110efa0af 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -903,12 +903,12 @@ pub fn mod(comptime T: type, numerator: T, denominator: T) !T {
}
test "mod" {
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+
try testMod();
try comptime testMod();
}
fn testMod() !void {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testing.expect((mod(i32, -5, 3) catch unreachable) == 1);
try testing.expect((mod(i32, 5, 3) catch unreachable) == 2);
try testing.expectError(error.NegativeDenominator, mod(i32, 10, -1));
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index d54c417dfb..b5ad916c20 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -71,8 +71,6 @@ test "big.int set negative minimum" {
}
test "big.int set double-width maximum then zero" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, maxInt(DoubleLimb));
defer a.deinit();
try a.set(@as(DoubleLimb, 0));
@@ -246,8 +244,6 @@ test "big.int fits" {
}
test "big.int string set" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.init(testing.allocator);
defer a.deinit();
@@ -264,8 +260,6 @@ test "big.int string negative" {
}
test "big.int string set number with underscores" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.init(testing.allocator);
defer a.deinit();
@@ -274,8 +268,6 @@ test "big.int string set number with underscores" {
}
test "big.int string set case insensitive number" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.init(testing.allocator);
defer a.deinit();
@@ -326,8 +318,6 @@ test "big.int twos complement limit set" {
}
test "big.int string to" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, 120317241209124781241290847124);
defer a.deinit();
@@ -368,8 +358,6 @@ test "big.int string to base 16" {
}
test "big.int neg string to" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, -123907434);
defer a.deinit();
@@ -392,8 +380,6 @@ test "big.int zero string to" {
}
test "big.int clone" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, 1234);
defer a.deinit();
var b = try a.clone();
@@ -634,8 +620,6 @@ test "big.int addWrap single-single, unsigned" {
}
test "big.int subWrap single-single, unsigned" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, 0);
defer a.deinit();
@@ -963,8 +947,6 @@ test "big.int mul multi-multi" {
}
test "big.int mul alias r with a" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 2);
@@ -976,8 +958,6 @@ test "big.int mul alias r with a" {
}
test "big.int mul alias r with b" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 2);
@@ -989,8 +969,6 @@ test "big.int mul alias r with b" {
}
test "big.int mul alias r with a and b" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
defer a.deinit();
@@ -1096,7 +1074,7 @@ test "big.int mulWrap multi-multi unsigned" {
test "big.int mulWrap multi-multi signed" {
switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
+ .stage2_c => return error.SkipZigTest,
else => {},
}
@@ -1171,8 +1149,6 @@ test "big.int div single-half with rem" {
}
test "big.int div single-single no rem" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// assumes usize is <= 64 bits.
var a = try Managed.initSet(testing.allocator, 1 << 52);
defer a.deinit();
@@ -1190,8 +1166,6 @@ test "big.int div single-single no rem" {
}
test "big.int div single-single with rem" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, (1 << 52) | (1 << 33));
defer a.deinit();
var b = try Managed.initSet(testing.allocator, (1 << 35));
@@ -1271,8 +1245,6 @@ test "big.int div multi>2-single" {
}
test "big.int div single-single q < r" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, 0x0078f432);
defer a.deinit();
var b = try Managed.initSet(testing.allocator, 0x01000000);
@@ -1317,10 +1289,7 @@ test "big.int div q=0 alias" {
}
test "big.int div multi-multi q < r" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const op1 = 0x1ffffffff0078f432;
const op2 = 0x1ffffffff01000000;
@@ -1642,8 +1611,6 @@ test "big.int div floor single-single -/-" {
}
test "big.int div floor no remainder negative quotient" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const u: i32 = -0x80000000;
const v: i32 = 1;
@@ -1743,10 +1710,7 @@ test "big.int div multi-multi no rem" {
}
test "big.int div multi-multi (2 branch)" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a = try Managed.initSet(testing.allocator, 0x866666665555555588888887777777761111111111111111);
defer a.deinit();
@@ -1785,10 +1749,7 @@ test "big.int div multi-multi (3.1/3.3 branch)" {
}
test "big.int div multi-single zero-limb trailing" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a = try Managed.initSet(testing.allocator, 0x60000000000000000000000000000000000000000000000000000000000000000);
defer a.deinit();
@@ -1808,10 +1769,7 @@ test "big.int div multi-single zero-limb trailing" {
}
test "big.int div multi-multi zero-limb trailing (with rem)" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000);
defer a.deinit();
@@ -1908,8 +1866,6 @@ test "big.int div multi-multi fuzz case #1" {
}
test "big.int div multi-multi fuzz case #2" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.init(testing.allocator);
defer a.deinit();
var b = try Managed.init(testing.allocator);
@@ -2672,8 +2628,6 @@ test "big.int mutable to managed" {
}
test "big.int const to managed" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Managed.initSet(testing.allocator, 123423453456);
defer a.deinit();
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index a7b2e0b52b..f3812fe35d 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -494,8 +494,6 @@ fn extractLowBits(a: Int, comptime T: type) T {
}
test "big.rational extractLowBits" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Int.initSet(testing.allocator, 0x11112222333344441234567887654321);
defer a.deinit();
@@ -649,8 +647,6 @@ test "big.rational copy" {
}
test "big.rational negate" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Rational.init(testing.allocator);
defer a.deinit();
@@ -668,8 +664,6 @@ test "big.rational negate" {
}
test "big.rational abs" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var a = try Rational.init(testing.allocator);
defer a.deinit();
diff --git a/lib/std/math/ilogb.zig b/lib/std/math/ilogb.zig
index f3cfc5f6fe..b03d82bcbf 100644
--- a/lib/std/math/ilogb.zig
+++ b/lib/std/math/ilogb.zig
@@ -125,8 +125,6 @@ test "80" {
}
test "128" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expect(ilogbX(f128, 0.0) == fp_ilogb0);
try expect(ilogbX(f128, 0.5) == -1);
try expect(ilogbX(f128, 0.8923) == -1);
@@ -162,8 +160,6 @@ test "64 special" {
}
test "80 special" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expect(ilogbX(f80, math.inf(f80)) == maxInt(i32));
try expect(ilogbX(f80, -math.inf(f80)) == maxInt(i32));
try expect(ilogbX(f80, 0.0) == minInt(i32));
@@ -171,8 +167,6 @@ test "80 special" {
}
test "128 special" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try expect(ilogbX(f128, math.inf(f128)) == maxInt(i32));
try expect(ilogbX(f128, -math.inf(f128)) == maxInt(i32));
try expect(ilogbX(f128, 0.0) == minInt(i32));
diff --git a/lib/std/math/log10.zig b/lib/std/math/log10.zig
index 7544c0f9cc..0619fdbc17 100644
--- a/lib/std/math/log10.zig
+++ b/lib/std/math/log10.zig
@@ -147,7 +147,6 @@ test "oldlog10 doesn't work" {
test "log10_int vs old implementation" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
diff --git a/lib/std/math/log_int.zig b/lib/std/math/log_int.zig
index 066f983f08..d73e273d71 100644
--- a/lib/std/math/log_int.zig
+++ b/lib/std/math/log_int.zig
@@ -56,8 +56,6 @@ pub fn log_int(comptime T: type, base: T, x: T) Log2Int(T) {
}
test "math.log_int" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// Test all unsigned integers with 2, 3, ..., 64 bits.
// We cannot test 0 or 1 bits since base must be > 1.
inline for (2..64 + 1) |bits| {
diff --git a/lib/std/math/nextafter.zig b/lib/std/math/nextafter.zig
index 56117b36d9..9008858a62 100644
--- a/lib/std/math/nextafter.zig
+++ b/lib/std/math/nextafter.zig
@@ -1,4 +1,3 @@
-const builtin = @import("builtin");
const std = @import("../std.zig");
const math = std.math;
const assert = std.debug.assert;
@@ -103,7 +102,7 @@ fn nextAfterFloat(comptime T: type, x: T, y: T) T {
}
test "math.nextAfter.int" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
try expect(nextAfter(i0, 0, 0) == 0);
try expect(nextAfter(u0, 0, 0) == 0);
diff --git a/lib/std/math/scalbn.zig b/lib/std/math/scalbn.zig
index 4515cda5b0..2c8c3733fa 100644
--- a/lib/std/math/scalbn.zig
+++ b/lib/std/math/scalbn.zig
@@ -7,8 +7,6 @@ const expect = std.testing.expect;
pub const scalbn = @import("ldexp.zig").ldexp;
test "math.scalbn" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// Verify we are using base 2.
try expect(scalbn(@as(f16, 1.5), 4) == 24.0);
try expect(scalbn(@as(f32, 1.5), 4) == 24.0);
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index cf9369fbe4..a060db710c 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -1741,8 +1741,6 @@ pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: Endian) T {
}
test "comptime read/write int" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
comptime {
var bytes: [2]u8 = undefined;
writeIntLittle(u16, &bytes, 0x1234);
@@ -2062,10 +2060,7 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
}
test "writeIntBig and writeIntLittle" {
- switch (builtin.zig_backend) {
- .stage2_c, .stage2_x86_64 => return error.SkipZigTest,
- else => {},
- }
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var buf0: [0]u8 = undefined;
var buf1: [1]u8 = undefined;
@@ -3309,8 +3304,6 @@ test "testStringEquality" {
}
test "testReadInt" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testReadIntImpl();
try comptime testReadIntImpl();
}
@@ -4668,6 +4661,7 @@ test "read/write(Var)PackedInt" {
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
else => {},
}
+
switch (builtin.cpu.arch) {
// This test generates too much code to execute on WASI.
// LLVM backend fails with "too many locals: locals exceed maximum"
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index eca1301320..11dec78036 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -583,8 +583,6 @@ pub fn MultiArrayList(comptime T: type) type {
}
test "basic usage" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = struct {
@@ -679,8 +677,6 @@ test "basic usage" {
// This was observed to fail on aarch64 with LLVM 11, when the capacityInBytes
// function used the @reduce code path.
test "regression test for @reduce bug" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
var list = MultiArrayList(struct {
tag: std.zig.Token.Tag,
@@ -758,8 +754,6 @@ test "regression test for @reduce bug" {
}
test "ensure capacity on empty list" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = struct {
@@ -795,8 +789,6 @@ test "ensure capacity on empty list" {
}
test "insert elements" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = struct {
@@ -816,8 +808,6 @@ test "insert elements" {
}
test "union" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = union(enum) {
@@ -873,8 +863,6 @@ test "union" {
}
test "sorting a span" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var list: MultiArrayList(struct { score: u32, chr: u8 }) = .{};
defer list.deinit(testing.allocator);
@@ -915,8 +903,6 @@ test "sorting a span" {
}
test "0 sized struct field" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = struct {
@@ -944,8 +930,6 @@ test "0 sized struct field" {
}
test "0 sized struct" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const ally = testing.allocator;
const Foo = struct {
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 1bacfe867f..73df32b3b1 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -109,9 +109,8 @@ test "parse and render UNIX addresses" {
}
test "resolve DNS" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
@@ -295,8 +294,6 @@ test "listen on a unix socket, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 91a9f1618c..915036d962 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -1741,8 +1741,6 @@ test "readv" {
test "writev/fsync/readv" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(4, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -1813,8 +1811,6 @@ test "writev/fsync/readv" {
test "write/read" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -1862,8 +1858,6 @@ test "write/read" {
test "splice/read" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(4, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -1935,8 +1929,6 @@ test "splice/read" {
test "write_fixed/read_fixed" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2002,8 +1994,6 @@ test "write_fixed/read_fixed" {
test "openat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2055,8 +2045,6 @@ test "openat" {
test "close" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2392,8 +2380,6 @@ test "accept/connect/recv/link_timeout" {
test "fallocate" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2440,8 +2426,6 @@ test "fallocate" {
test "statx" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2698,8 +2682,6 @@ test "shutdown" {
test "renameat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2769,8 +2751,6 @@ test "renameat" {
test "unlinkat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2823,8 +2803,6 @@ test "unlinkat" {
test "mkdirat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2869,8 +2847,6 @@ test "mkdirat" {
test "symlinkat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@@ -2919,8 +2895,6 @@ test "symlinkat" {
test "linkat" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index 1c2ca643b2..170bde6334 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -8,8 +8,6 @@ const expectEqual = std.testing.expectEqual;
const fs = std.fs;
test "fallocate" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
@@ -71,8 +69,6 @@ test "timer" {
}
test "statx" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
@@ -111,8 +107,6 @@ test "user and group ids" {
}
test "fadvise" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index ca3de5f264..60acfb00ec 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -88,8 +88,6 @@ test "chdir smoke test" {
test "open smoke test" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// TODO verify file attributes using `fstat`
var tmp = tmpDir(.{});
@@ -144,8 +142,6 @@ test "open smoke test" {
test "openat smoke test" {
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
// TODO verify file attributes using `fstatat`
var tmp = tmpDir(.{});
@@ -280,8 +276,6 @@ test "link with relative paths" {
test "linkat with different directories" {
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
switch (native_os) {
.wasi, .linux, .solaris, .illumos => {},
else => return error.SkipZigTest,
@@ -327,8 +321,6 @@ test "fstatat" {
// enable when `fstat` and `fstatat` are implemented on Windows
if (native_os == .windows) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -348,8 +340,6 @@ test "fstatat" {
}
test "readlinkat" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -583,8 +573,6 @@ test "mmap" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -695,8 +683,6 @@ test "fcntl" {
if (native_os == .windows or native_os == .wasi)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -737,8 +723,6 @@ test "sync" {
if (native_os != .linux)
return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -759,8 +743,6 @@ test "fsync" {
else => return error.SkipZigTest,
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -780,8 +762,6 @@ test "getrlimit and setrlimit" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
inline for (std.meta.fields(os.rlimit_resource)) |field| {
const resource = @as(os.rlimit_resource, @enumFromInt(field.value));
const limit = try os.getrlimit(resource);
@@ -903,8 +883,6 @@ test "dup & dup2" {
else => return error.SkipZigTest,
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -934,8 +912,6 @@ test "dup & dup2" {
test "writev longer than IOV_MAX" {
if (native_os == .windows or native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -953,8 +929,6 @@ test "POSIX file locking with fcntl" {
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
if (true) {
// https://github.com/ziglang/zig/issues/11074
return error.SkipZigTest;
@@ -1017,8 +991,6 @@ test "POSIX file locking with fcntl" {
test "rename smoke test" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1075,8 +1047,6 @@ test "rename smoke test" {
test "access smoke test" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1141,8 +1111,6 @@ test "timerfd" {
}
test "isatty" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1155,8 +1123,6 @@ test "isatty" {
test "read with empty buffer" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1182,8 +1148,6 @@ test "read with empty buffer" {
test "pread with empty buffer" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1209,8 +1173,6 @@ test "pread with empty buffer" {
test "write with empty buffer" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1236,8 +1198,6 @@ test "write with empty buffer" {
test "pwrite with empty buffer" {
if (native_os == .wasi) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
@@ -1263,8 +1223,6 @@ test "pwrite with empty buffer" {
test "fchmodat smoke test" {
if (!std.fs.has_executable_bit) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var tmp = tmpDir(.{});
defer tmp.cleanup();
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index b426db968d..05b754de8d 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -2491,8 +2491,6 @@ pub fn ntToWin32Namespace(path: []const u16) !PathSpace {
}
test "ntToWin32Namespace" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const L = std.unicode.utf8ToUtf16LeStringLiteral;
try testNtToWin32Namespace(L("UNC"), L("\\??\\UNC"));
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index b68bd97d05..31ae965286 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -705,8 +705,6 @@ test "std.PriorityDequeue: fromOwnedSlice trivial case 1" {
}
test "std.PriorityDequeue: fromOwnedSlice" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const queue_items = try testing.allocator.dupe(u32, items[0..]);
var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..], {});
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index 84ee9d6cdf..a568eeadcf 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -385,8 +385,6 @@ test "std.PriorityQueue: fromOwnedSlice trivial case 1" {
}
test "std.PriorityQueue: fromOwnedSlice" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const heap_items = try testing.allocator.dupe(u32, items[0..]);
var queue = PQlt.fromOwnedSlice(testing.allocator, heap_items[0..], {});
diff --git a/lib/std/rand/test.zig b/lib/std/rand/test.zig
index 39b25eff15..407e843198 100644
--- a/lib/std/rand/test.zig
+++ b/lib/std/rand/test.zig
@@ -443,8 +443,6 @@ fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void {
}
test "CSPRNG" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var secret_seed: [DefaultCsprng.secret_seed_length]u8 = undefined;
std.crypto.random.bytes(&secret_seed);
var csprng = DefaultCsprng.init(secret_seed);
diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig
index 0c73fdc982..1c9cffa766 100644
--- a/lib/std/segmented_list.zig
+++ b/lib/std/segmented_list.zig
@@ -409,8 +409,6 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
test "SegmentedList basic usage" {
- if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testSegmentedList(0);
try testSegmentedList(1);
try testSegmentedList(2);
diff --git a/lib/std/simd.zig b/lib/std/simd.zig
index 7629dfac1a..53df15aa18 100644
--- a/lib/std/simd.zig
+++ b/lib/std/simd.zig
@@ -13,7 +13,7 @@ pub fn suggestVectorSizeForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?
const vector_bit_size: u16 = blk: {
if (cpu.arch.isX86()) {
if (T == bool and std.Target.x86.featureSetHas(cpu.features, .prefer_mask_registers)) return 64;
- if (std.Target.x86.featureSetHas(cpu.features, .avx512f) and !std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
+ if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(cpu.features, .avx512f) and !std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .avx2 }) and !std.Target.x86.featureSetHas(cpu.features, .prefer_128_bit)) break :blk 256;
if (std.Target.x86.featureSetHas(cpu.features, .sse)) break :blk 128;
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .mmx, .@"3dnow" })) break :blk 64;
@@ -62,10 +62,15 @@ pub fn suggestVectorSize(comptime T: type) ?comptime_int {
test "suggestVectorSizeForCpu works with signed and unsigned values" {
comptime var cpu = std.Target.Cpu.baseline(std.Target.Cpu.Arch.x86_64);
comptime cpu.features.addFeature(@intFromEnum(std.Target.x86.Feature.avx512f));
+ comptime cpu.features.populateDependencies(&std.Target.x86.all_features);
+ const expected_size: usize = switch (builtin.zig_backend) {
+ .stage2_x86_64 => 8,
+ else => 16,
+ };
const signed_integer_size = suggestVectorSizeForCpu(i32, cpu).?;
const unsigned_integer_size = suggestVectorSizeForCpu(u32, cpu).?;
- try std.testing.expectEqual(@as(usize, 16), unsigned_integer_size);
- try std.testing.expectEqual(@as(usize, 16), signed_integer_size);
+ try std.testing.expectEqual(expected_size, unsigned_integer_size);
+ try std.testing.expectEqual(expected_size, signed_integer_size);
}
fn vectorLength(comptime VectorType: type) comptime_int {
diff --git a/lib/std/sort.zig b/lib/std/sort.zig
index 149f632944..e110a8beb8 100644
--- a/lib/std/sort.zig
+++ b/lib/std/sort.zig
@@ -1,6 +1,5 @@
const std = @import("std.zig");
const assert = std.debug.assert;
-const builtin = @import("builtin");
const testing = std.testing;
const mem = std.mem;
const math = std.math;
@@ -177,8 +176,6 @@ const IdAndValue = struct {
};
test "stable sort" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const expected = [_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 0 },
@@ -226,8 +223,6 @@ test "stable sort" {
}
test "sort" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const u8cases = [_][]const []const u8{
&[_][]const u8{
"",
@@ -306,8 +301,6 @@ test "sort" {
}
test "sort descending" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const rev_cases = [_][]const []const i32{
&[_][]const i32{
&[_]i32{},
@@ -347,8 +340,6 @@ test "sort descending" {
}
test "sort with context in the middle of a slice" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const Context = struct {
items: []i32,
@@ -388,8 +379,6 @@ test "sort with context in the middle of a slice" {
}
test "sort fuzz testing" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var prng = std.rand.DefaultPrng.init(0x12345678);
const random = prng.random();
const test_case_count = 10;
diff --git a/lib/std/time.zig b/lib/std/time.zig
index 1948decdf8..010c1af291 100644
--- a/lib/std/time.zig
+++ b/lib/std/time.zig
@@ -321,8 +321,6 @@ pub const Timer = struct {
};
test "Timer + Instant" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const margin = ns_per_ms * 150;
var timer = try Timer.start();
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index b1a9b3db0b..e4a84b42c9 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -1,6 +1,5 @@
const std = @import("./std.zig");
const assert = std.debug.assert;
-const builtin = @import("builtin");
const testing = std.testing;
const mem = std.mem;
@@ -497,15 +496,11 @@ fn testUtf16CountCodepoints() !void {
}
test "utf16 count codepoints" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testUtf16CountCodepoints();
try comptime testUtf16CountCodepoints();
}
test "utf8 encode" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8Encode();
try testUtf8Encode();
}
@@ -532,8 +527,6 @@ fn testUtf8Encode() !void {
}
test "utf8 encode error" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8EncodeError();
try testUtf8EncodeError();
}
@@ -550,8 +543,6 @@ fn testErrorEncode(codePoint: u21, array: []u8, expectedErr: anyerror) !void {
}
test "utf8 iterator on ascii" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8IteratorOnAscii();
try testUtf8IteratorOnAscii();
}
@@ -572,8 +563,6 @@ fn testUtf8IteratorOnAscii() !void {
}
test "utf8 view bad" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8ViewBad();
try testUtf8ViewBad();
}
@@ -584,8 +573,6 @@ fn testUtf8ViewBad() !void {
}
test "utf8 view ok" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8ViewOk();
try testUtf8ViewOk();
}
@@ -606,8 +593,6 @@ fn testUtf8ViewOk() !void {
}
test "validate slice" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testValidateSlice();
try testValidateSlice();
@@ -648,8 +633,6 @@ fn testValidateSlice() !void {
}
test "valid utf8" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testValidUtf8();
try testValidUtf8();
}
@@ -669,8 +652,6 @@ fn testValidUtf8() !void {
}
test "invalid utf8 continuation bytes" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testInvalidUtf8ContinuationBytes();
try testInvalidUtf8ContinuationBytes();
}
@@ -703,8 +684,6 @@ fn testInvalidUtf8ContinuationBytes() !void {
}
test "overlong utf8 codepoint" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testOverlongUtf8Codepoint();
try testOverlongUtf8Codepoint();
}
@@ -718,8 +697,6 @@ fn testOverlongUtf8Codepoint() !void {
}
test "misc invalid utf8" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testMiscInvalidUtf8();
try testMiscInvalidUtf8();
}
@@ -735,8 +712,6 @@ fn testMiscInvalidUtf8() !void {
}
test "utf8 iterator peeking" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try comptime testUtf8Peeking();
try testUtf8Peeking();
}
@@ -821,8 +796,6 @@ pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
}
test "utf16leToUtf8" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var utf16le: [2]u16 = undefined;
const utf16le_as_bytes = mem.sliceAsBytes(utf16le[0..]);
@@ -935,8 +908,6 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
}
test "utf8ToUtf16Le" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
var utf16le: [2]u16 = [_]u16{0} ** 2;
{
const length = try utf8ToUtf16Le(utf16le[0..], "𐐷");
@@ -955,8 +926,6 @@ test "utf8ToUtf16Le" {
}
test "utf8ToUtf16LeWithNull" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
{
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "𐐷");
defer testing.allocator.free(utf16);
@@ -1015,8 +984,6 @@ fn testCalcUtf16LeLen() !void {
}
test "calculate utf16 string length of given utf8 string in u16" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testCalcUtf16LeLen();
try comptime testCalcUtf16LeLen();
}
@@ -1050,8 +1017,6 @@ pub fn fmtUtf16le(utf16le: []const u16) std.fmt.Formatter(formatUtf16le) {
}
test "fmtUtf16le" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
const expectFmt = std.testing.expectFmt;
try expectFmt("", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral(""))});
try expectFmt("foo", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral("foo"))});
@@ -1065,8 +1030,6 @@ test "fmtUtf16le" {
}
test "utf8ToUtf16LeStringLiteral" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
{
const bytes = [_:0]u16{
mem.nativeToLittle(u16, 0x41),
@@ -1127,8 +1090,6 @@ fn testUtf8CountCodepoints() !void {
}
test "utf8 count codepoints" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testUtf8CountCodepoints();
try comptime testUtf8CountCodepoints();
}
@@ -1145,8 +1106,6 @@ fn testUtf8ValidCodepoint() !void {
}
test "utf8 valid codepoint" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testUtf8ValidCodepoint();
try comptime testUtf8ValidCodepoint();
}
diff --git a/lib/std/zig/fmt.zig b/lib/std/zig/fmt.zig
index 6804af466b..908f1e5190 100644
--- a/lib/std/zig/fmt.zig
+++ b/lib/std/zig/fmt.zig
@@ -1,5 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
const mem = std.mem;
/// Print the string as a Zig identifier escaping it with @"" syntax if needed.
@@ -96,7 +95,7 @@ pub fn fmtEscapes(bytes: []const u8) std.fmt.Formatter(stringEscape) {
}
test "escape invalid identifiers" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
const expectFmt = std.testing.expectFmt;
try expectFmt("@\"while\"", "{}", .{fmtId("while")});
diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig
index 4cbcb3af72..59dcf29e16 100644
--- a/lib/std/zig/tokenizer.zig
+++ b/lib/std/zig/tokenizer.zig
@@ -1481,8 +1481,6 @@ test "utf8" {
}
test "invalid utf8" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
-
try testTokenize("//\x80", &.{
.invalid,
});
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 02c7aaf20f..b9f352480b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -656,11 +656,14 @@ const InstTracking = struct {
fn reuse(
self: *InstTracking,
function: *Self,
- new_inst: Air.Inst.Index,
+ new_inst: ?Air.Inst.Index,
old_inst: Air.Inst.Index,
) void {
self.short = .{ .dead = function.scope_generation };
- tracking_log.debug("%{d} => {} (reuse %{d})", .{ new_inst, self.*, old_inst });
+ if (new_inst) |inst|
+ tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, self.*, old_inst })
+ else
+ tracking_log.debug("tmp => {} (reuse %{d})", .{ self.*, old_inst });
}
fn liveOut(self: *InstTracking, function: *Self, inst: Air.Inst.Index) void {
@@ -1457,7 +1460,10 @@ fn asmRegisterRegisterRegisterImmediate(
.r1 = reg1,
.r2 = reg2,
.r3 = reg3,
- .i = @as(u8, @intCast(imm.unsigned)),
+ .i = switch (imm) {
+ .signed => |s| @bitCast(@as(i8, @intCast(s))),
+ .unsigned => |u| @intCast(u),
+ },
} },
});
}
@@ -1560,24 +1566,58 @@ fn asmRegisterMemoryImmediate(
m: Memory,
imm: Immediate,
) !void {
- _ = try self.addInst(.{
- .tag = tag[1],
- .ops = switch (m) {
- .sib => .rmi_sib,
- .rip => .rmi_rip,
+ if (switch (imm) {
+ .signed => |s| if (math.cast(i16, s)) |x| @as(u16, @bitCast(x)) else null,
+ .unsigned => |u| math.cast(u16, u),
+ }) |small_imm| {
+ _ = try self.addInst(.{
+ .tag = tag[1],
+ .ops = switch (m) {
+ .sib => .rmi_sib,
+ .rip => .rmi_rip,
+ else => unreachable,
+ },
+ .data = .{ .rix = .{
+ .fixes = tag[0],
+ .r1 = reg,
+ .i = small_imm,
+ .payload = switch (m) {
+ .sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ else => unreachable,
+ },
+ } },
+ });
+ } else {
+ const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
+ .signed => |s| @bitCast(s),
+ .unsigned => unreachable,
+ } });
+ assert(payload + 1 == switch (m) {
+ .sib => try self.addExtra(Mir.MemorySib.encode(m)),
+ .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
- },
- .data = .{ .rix = .{
- .fixes = tag[0],
- .r1 = reg,
- .i = @as(u8, @intCast(imm.unsigned)),
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
+ });
+ _ = try self.addInst(.{
+ .tag = tag[1],
+ .ops = switch (m) {
+ .sib => switch (imm) {
+ .signed => .rmi_sib_s,
+ .unsigned => .rmi_sib_u,
+ },
+ .rip => switch (imm) {
+ .signed => .rmi_rip_s,
+ .unsigned => .rmi_rip_u,
+ },
else => unreachable,
},
- } },
- });
+ .data = .{ .rx = .{
+ .fixes = tag[0],
+ .r1 = reg,
+ .payload = payload,
+ } },
+ });
+ }
}
fn asmRegisterRegisterMemoryImmediate(
@@ -2047,7 +2087,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.get_union_tag => try self.airGetUnionTag(inst),
.clz => try self.airClz(inst),
.ctz => try self.airCtz(inst),
- .popcount => try self.airPopcount(inst),
+ .popcount => try self.airPopCount(inst),
.byte_swap => try self.airByteSwap(inst),
.bit_reverse => try self.airBitReverse(inst),
.tag_name => try self.airTagName(inst),
@@ -2708,11 +2748,11 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
},
else => unreachable,
}) {
- var callee: ["__trunc?f?f2".len]u8 = undefined;
+ var callee_buf: ["__trunc?f?f2".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = self.floatCompilerRtAbiType(dst_ty, src_ty).toIntern(),
.param_types = &.{self.floatCompilerRtAbiType(src_ty, dst_ty).toIntern()},
- .callee = std.fmt.bufPrint(&callee, "__trunc{c}f{c}f2", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__trunc{c}f{c}f2", .{
floatCompilerRtAbiName(src_bits),
floatCompilerRtAbiName(dst_bits),
}) catch unreachable,
@@ -2740,7 +2780,7 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
.{ .v_, .cvtps2ph },
dst_reg,
mat_src_reg.to128(),
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
},
else => unreachable,
@@ -2807,11 +2847,11 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
},
else => unreachable,
}) {
- var callee: ["__extend?f?f2".len]u8 = undefined;
+ var callee_buf: ["__extend?f?f2".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = self.floatCompilerRtAbiType(dst_ty, src_ty).toIntern(),
.param_types = &.{self.floatCompilerRtAbiType(src_ty, dst_ty).toIntern()},
- .callee = std.fmt.bufPrint(&callee, "__extend{c}f{c}f2", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__extend{c}f{c}f2", .{
floatCompilerRtAbiName(src_bits),
floatCompilerRtAbiName(dst_bits),
}) catch unreachable,
@@ -2982,11 +3022,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else if (dst_abi_size <= 8)
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv)
else if (dst_abi_size <= 16) dst: {
- const dst_regs = try self.register_manager.allocRegs(
- 2,
- .{ inst, inst },
- abi.RegisterClass.gp,
- );
+ const dst_regs =
+ try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
const dst_mcv: MCValue = .{ .register_pair = dst_regs };
const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
@@ -3713,14 +3750,22 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
else => unreachable,
.mul => {},
.div => switch (tag[0]) {
- ._ => try self.asmRegisterRegister(.{ ._, .xor }, .edx, .edx),
- .i_ => switch (self.regBitSize(ty)) {
- 8 => try self.asmOpOnly(.{ ._, .cbw }),
- 16 => try self.asmOpOnly(.{ ._, .cwd }),
- 32 => try self.asmOpOnly(.{ ._, .cdq }),
- 64 => try self.asmOpOnly(.{ ._, .cqo }),
- else => unreachable,
+ ._ => {
+ const hi_reg: Register =
+ switch (self.regBitSize(ty)) {
+ 8 => .ah,
+ 16, 32, 64 => .edx,
+ else => unreachable,
+ };
+ try self.asmRegisterRegister(.{ ._, .xor }, hi_reg, hi_reg);
},
+ .i_ => try self.asmOpOnly(.{ ._, switch (self.regBitSize(ty)) {
+ 8 => .cbw,
+ 16 => .cwd,
+ 32 => .cdq,
+ 64 => .cqo,
+ else => unreachable,
+ } }),
else => unreachable,
},
}
@@ -4626,6 +4671,19 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{
src_ty.fmt(mod),
});
+ const src_bits: u32 = @intCast(src_ty.bitSize(mod));
+
+ const has_lzcnt = self.hasFeature(.lzcnt);
+ if (src_bits > 64 and !has_lzcnt) {
+ var callee_buf: ["__clz?i2".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = .i32_type,
+ .param_types = &.{src_ty.toIntern()},
+ .callee = std.fmt.bufPrint(&callee_buf, "__clz{c}i2", .{
+ intCompilerRtAbiName(src_bits),
+ }) catch unreachable,
+ } }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }});
+ }
const src_mcv = try self.resolveInst(ty_op.operand);
const mat_src_mcv = switch (src_mcv) {
@@ -4643,8 +4701,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const src_bits = src_ty.bitSize(mod);
- if (self.hasFeature(.lzcnt)) {
+ if (has_lzcnt) {
if (src_bits <= 8) {
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
try self.truncateRegister(src_ty, wide_reg);
@@ -4667,24 +4724,33 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- try self.genBinOpMir(.{ ._, .lzcnt }, Type.u64, dst_mcv, mat_src_mcv);
+ try self.genBinOpMir(
+ .{ ._, .lzcnt },
+ Type.u64,
+ dst_mcv,
+ if (mat_src_mcv.isMemory())
+ mat_src_mcv
+ else
+ .{ .register = mat_src_mcv.register_pair[0] },
+ );
try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 });
try self.genBinOpMir(
.{ ._, .lzcnt },
Type.u64,
tmp_mcv,
- mat_src_mcv.address().offset(8).deref(),
+ if (mat_src_mcv.isMemory())
+ mat_src_mcv.address().offset(8).deref()
+ else
+ .{ .register = mat_src_mcv.register_pair[1] },
);
try self.asmCmovccRegisterRegister(.nc, dst_reg.to32(), tmp_reg.to32());
- if (src_bits < 128) {
- try self.genBinOpMir(
- .{ ._, .sub },
- dst_ty,
- dst_mcv,
- .{ .immediate = 128 - src_bits },
- );
- }
+ if (src_bits < 128) try self.genBinOpMir(
+ .{ ._, .sub },
+ dst_ty,
+ dst_mcv,
+ .{ .immediate = 128 - src_bits },
+ );
} else return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)});
break :result dst_mcv;
}
@@ -4755,7 +4821,22 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const result = result: {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- const src_bits = src_ty.bitSize(mod);
+ if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{
+ src_ty.fmt(mod),
+ });
+ const src_bits: u32 = @intCast(src_ty.bitSize(mod));
+
+ const has_bmi = self.hasFeature(.bmi);
+ if (src_bits > 64 and !has_bmi) {
+ var callee_buf: ["__ctz?i2".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = .i32_type,
+ .param_types = &.{src_ty.toIntern()},
+ .callee = std.fmt.bufPrint(&callee_buf, "__ctz{c}i2", .{
+ intCompilerRtAbiName(src_bits),
+ }) catch unreachable,
+ } }, &.{src_ty}, &.{.{ .air_ref = ty_op.operand }});
+ }
const src_mcv = try self.resolveInst(ty_op.operand);
const mat_src_mcv = switch (src_mcv) {
@@ -4800,8 +4881,16 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
+ const lo_mat_src_mcv: MCValue = if (mat_src_mcv.isMemory())
+ mat_src_mcv
+ else
+ .{ .register = mat_src_mcv.register_pair[0] };
+ const hi_mat_src_mcv: MCValue = if (mat_src_mcv.isMemory())
+ mat_src_mcv.address().offset(8).deref()
+ else
+ .{ .register = mat_src_mcv.register_pair[1] };
const masked_mcv = if (src_bits < 128) masked: {
- try self.genCopy(Type.u64, dst_mcv, mat_src_mcv.address().offset(8).deref());
+ try self.genCopy(Type.u64, dst_mcv, hi_mat_src_mcv);
try self.genBinOpMir(
.{ ._, .@"or" },
Type.u64,
@@ -4809,10 +4898,10 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
.{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(src_bits - 64) },
);
break :masked dst_mcv;
- } else mat_src_mcv.address().offset(8).deref();
+ } else hi_mat_src_mcv;
try self.genBinOpMir(.{ ._, .tzcnt }, Type.u64, dst_mcv, masked_mcv);
try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 });
- try self.genBinOpMir(.{ ._, .tzcnt }, Type.u64, tmp_mcv, mat_src_mcv);
+ try self.genBinOpMir(.{ ._, .tzcnt }, Type.u64, tmp_mcv, lo_mat_src_mcv);
try self.asmCmovccRegisterRegister(.nc, dst_reg.to32(), tmp_reg.to32());
} else return self.fail("TODO airCtz of {}", .{src_ty.fmt(mod)});
break :result dst_mcv;
@@ -4844,155 +4933,216 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
+fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
+ try self.spillEflagsIfOccupied();
+
const src_ty = self.typeOf(ty_op.operand);
const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
- if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 8)
- return self.fail("TODO implement airPopcount for {}", .{src_ty.fmt(mod)});
+ if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 16)
+ return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(mod)});
const src_mcv = try self.resolveInst(ty_op.operand);
- if (self.hasFeature(.popcnt)) {
- const mat_src_mcv = switch (src_mcv) {
- .immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) },
- else => src_mcv,
- };
- const mat_src_lock = switch (mat_src_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
+ const mat_src_mcv = switch (src_mcv) {
+ .immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) },
+ else => src_mcv,
+ };
+ const mat_src_lock = switch (mat_src_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv: MCValue =
- if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
+ if (src_abi_size <= 8) {
+ const dst_contains_src =
+ src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv);
+ const dst_reg = if (dst_contains_src)
+ src_mcv.getReg().?
else
- .{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.gp) };
+ try self.register_manager.allocReg(inst, abi.RegisterClass.gp);
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const popcnt_ty = if (src_abi_size > 1) src_ty else Type.u16;
- try self.genBinOpMir(.{ ._, .popcnt }, popcnt_ty, dst_mcv, mat_src_mcv);
- break :result dst_mcv;
+ try self.genPopCount(dst_reg, src_ty, mat_src_mcv, dst_contains_src);
+ break :result .{ .register = dst_reg };
}
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8);
- const imm_0_1 = Immediate.u(mask / 0b1_1);
- const imm_00_11 = Immediate.u(mask / 0b01_01);
- const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
- const imm_0000_0001 = Immediate.u(mask / 0b1111_1111);
+ assert(src_abi_size > 8 and src_abi_size <= 16);
+ const tmp_regs = try self.register_manager.allocRegs(2, .{ inst, null }, abi.RegisterClass.gp);
+ const tmp_locks = self.register_manager.lockRegsAssumeUnused(2, tmp_regs);
+ defer for (tmp_locks) |lock| self.register_manager.unlockReg(lock);
- const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
+ try self.genPopCount(tmp_regs[0], Type.usize, if (mat_src_mcv.isMemory())
+ mat_src_mcv
else
- try self.copyToRegisterWithInstTracking(inst, src_ty, src_mcv);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ .{ .register = mat_src_mcv.register_pair[0] }, false);
+ try self.genPopCount(tmp_regs[1], Type.usize, if (mat_src_mcv.isMemory())
+ mat_src_mcv.address().offset(8).deref()
+ else
+ .{ .register = mat_src_mcv.register_pair[1] }, false);
+ try self.asmRegisterRegister(.{ ._, .add }, tmp_regs[0].to8(), tmp_regs[1].to8());
+ break :result .{ .register = tmp_regs[0] };
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
- const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
- const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
- defer self.register_manager.unlockReg(tmp_lock);
+fn genPopCount(
+ self: *Self,
+ dst_reg: Register,
+ src_ty: Type,
+ src_mcv: MCValue,
+ dst_contains_src: bool,
+) !void {
+ const mod = self.bin_file.options.module.?;
- {
- const dst = registerAlias(dst_reg, src_abi_size);
- const tmp = registerAlias(tmp_reg, src_abi_size);
- const imm = if (src_abi_size > 4)
- try self.register_manager.allocReg(null, abi.RegisterClass.gp)
- else
- undefined;
-
- // dst = operand
- try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
- // tmp = operand
- try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(1));
- // tmp = operand >> 1
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1);
- try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
- } else try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1);
- // tmp = (operand >> 1) & 0x55...55
- try self.asmRegisterRegister(.{ ._, .sub }, dst, tmp);
- // dst = temp1 = operand - ((operand >> 1) & 0x55...55)
- try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
- // tmp = temp1
- try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(2));
- // dst = temp1 >> 2
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11);
- try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
- try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
- } else {
- try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11);
- try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11);
- }
- // tmp = temp1 & 0x33...33
- // dst = (temp1 >> 2) & 0x33...33
- try self.asmRegisterRegister(.{ ._, .add }, tmp, dst);
- // tmp = temp2 = (temp1 & 0x33...33) + ((temp1 >> 2) & 0x33...33)
- try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp);
- // dst = temp2
- try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(4));
- // tmp = temp2 >> 4
- try self.asmRegisterRegister(.{ ._, .add }, dst, tmp);
- // dst = temp2 + (temp2 >> 4)
- if (src_abi_size > 4) {
- try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111);
- try self.asmRegisterImmediate(.{ ._, .mov }, tmp, imm_0000_0001);
- try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
- try self.asmRegisterRegister(.{ .i_, .mul }, dst, tmp);
- } else {
- try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111);
- if (src_abi_size > 1) {
- try self.asmRegisterRegisterImmediate(.{ .i_, .mul }, dst, dst, imm_0000_0001);
- }
- }
- // dst = temp3 = (temp2 + (temp2 >> 4)) & 0x0f...0f
- // dst = temp3 * 0x01...01
- if (src_abi_size > 1) {
- try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u((src_abi_size - 1) * 8));
- }
- // dst = (temp3 * 0x01...01) >> (bits - 8)
+ const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ if (self.hasFeature(.popcnt)) return self.genBinOpMir(
+ .{ ._, .popcnt },
+ if (src_abi_size > 1) src_ty else Type.u16,
+ .{ .register = dst_reg },
+ src_mcv,
+ );
+
+ const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8);
+ const imm_0_1 = Immediate.u(mask / 0b1_1);
+ const imm_00_11 = Immediate.u(mask / 0b01_01);
+ const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
+ const imm_0000_0001 = Immediate.u(mask / 0b1111_1111);
+
+ const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
+ const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer self.register_manager.unlockReg(tmp_lock);
+
+ const dst = registerAlias(dst_reg, src_abi_size);
+ const tmp = registerAlias(tmp_reg, src_abi_size);
+ const imm = if (src_abi_size > 4)
+ try self.register_manager.allocReg(null, abi.RegisterClass.gp)
+ else
+ undefined;
+
+ if (!dst_contains_src) try self.genSetReg(dst, src_ty, src_mcv);
+ // dst = operand
+ try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
+ // tmp = operand
+ try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(1));
+ // tmp = operand >> 1
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1);
+ try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
+ } else try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1);
+ // tmp = (operand >> 1) & 0x55...55
+ try self.asmRegisterRegister(.{ ._, .sub }, dst, tmp);
+ // dst = temp1 = operand - ((operand >> 1) & 0x55...55)
+ try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
+ // tmp = temp1
+ try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(2));
+ // dst = temp1 >> 2
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11);
+ try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
+ try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
+ } else {
+ try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11);
+ try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11);
+ }
+ // tmp = temp1 & 0x33...33
+ // dst = (temp1 >> 2) & 0x33...33
+ try self.asmRegisterRegister(.{ ._, .add }, tmp, dst);
+ // tmp = temp2 = (temp1 & 0x33...33) + ((temp1 >> 2) & 0x33...33)
+ try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp);
+ // dst = temp2
+ try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(4));
+ // tmp = temp2 >> 4
+ try self.asmRegisterRegister(.{ ._, .add }, dst, tmp);
+ // dst = temp2 + (temp2 >> 4)
+ if (src_abi_size > 4) {
+ try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111);
+ try self.asmRegisterImmediate(.{ ._, .mov }, tmp, imm_0000_0001);
+ try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
+ try self.asmRegisterRegister(.{ .i_, .mul }, dst, tmp);
+ } else {
+ try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111);
+ if (src_abi_size > 1) {
+ try self.asmRegisterRegisterImmediate(.{ .i_, .mul }, dst, dst, imm_0000_0001);
}
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+ // dst = temp3 = (temp2 + (temp2 >> 4)) & 0x0f...0f
+ // dst = temp3 * 0x01...01
+ if (src_abi_size > 1) {
+ try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u((src_abi_size - 1) * 8));
+ }
+ // dst = (temp3 * 0x01...01) >> (bits - 8)
}
-fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, mem_ok: bool) !MCValue {
+fn genByteSwap(
+ self: *Self,
+ inst: Air.Inst.Index,
+ src_ty: Type,
+ src_mcv: MCValue,
+ mem_ok: bool,
+) !MCValue {
const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (src_ty.zigTypeTag(mod) == .Vector) return self.fail(
- "TODO implement byteSwap for {}",
+ "TODO implement genByteSwap for {}",
.{src_ty.fmt(mod)},
);
- const src_bits = self.regBitSize(src_ty);
+ const abi_size: u32 = @intCast(src_ty.abiSize(mod));
const src_lock = switch (src_mcv) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
- switch (src_bits) {
- else => return self.fail("TODO implement byteSwap for {}", .{
+ switch (abi_size) {
+ else => return self.fail("TODO implement genByteSwap for {}", .{
src_ty.fmt(mod),
}),
- 8 => return if ((mem_ok or src_mcv.isRegister()) and
+ 1 => return if ((mem_ok or src_mcv.isRegister()) and
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, src_ty, src_mcv),
- 16 => if ((mem_ok or src_mcv.isRegister()) and
+ 2 => if ((mem_ok or src_mcv.isRegister()) and
self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
{
try self.genBinOpMir(.{ ._l, .ro }, src_ty, src_mcv, .{ .immediate = 8 });
return src_mcv;
},
- 32, 64 => if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
+ 3...8 => if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
try self.genUnOpMir(.{ ._, .bswap }, src_ty, src_mcv);
return src_mcv;
},
+ 9...16 => {
+ switch (src_mcv) {
+ .register_pair => |src_regs| if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) {
+ for (src_regs) |src_reg| try self.asmRegister(.{ ._, .bswap }, src_reg.to64());
+ return .{ .register_pair = .{ src_regs[1], src_regs[0] } };
+ },
+ else => {},
+ }
+
+ const dst_regs =
+ try self.register_manager.allocRegs(2, .{ inst, inst }, abi.RegisterClass.gp);
+ const dst_locks = self.register_manager.lockRegsAssumeUnused(2, dst_regs);
+ defer for (dst_locks) |lock| self.register_manager.unlockReg(lock);
+
+ if (src_mcv.isMemory()) {
+ try self.asmRegisterMemory(
+ .{ ._, .movbe },
+ dst_regs[0],
+ src_mcv.address().offset(8).deref().mem(.qword),
+ );
+ try self.asmRegisterMemory(.{ ._, .movbe }, dst_regs[1], src_mcv.mem(.qword));
+ } else for (dst_regs, src_mcv.register_pair) |dst_reg, src_reg| {
+ try self.asmRegisterRegister(.{ ._, .mov }, dst_reg.to64(), src_reg.to64());
+ try self.asmRegister(.{ ._, .bswap }, dst_reg.to64());
+ }
+ return .{ .register_pair = dst_regs };
+ },
}
if (src_mcv.isRegister()) {
@@ -5005,10 +5155,10 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m
defer self.register_manager.unlockReg(dst_lock);
try self.genSetReg(dst_mcv.register, src_ty, src_mcv);
- switch (src_bits) {
+ switch (abi_size) {
else => unreachable,
- 16 => try self.genBinOpMir(.{ ._l, .ro }, src_ty, dst_mcv, .{ .immediate = 8 }),
- 32, 64 => try self.genUnOpMir(.{ ._, .bswap }, src_ty, dst_mcv),
+ 2 => try self.genBinOpMir(.{ ._l, .ro }, src_ty, dst_mcv, .{ .immediate = 8 }),
+ 3...8 => try self.genUnOpMir(.{ ._, .bswap }, src_ty, dst_mcv),
}
} else try self.genBinOpMir(.{ ._, .movbe }, src_ty, dst_mcv, src_mcv);
return dst_mcv;
@@ -5028,18 +5178,19 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.typeOf(ty_op.operand);
+ const abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const bit_size: u32 = @intCast(src_ty.bitSize(mod));
const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true);
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh },
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
- }
+ const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, true);
+
+ const extra_bits = abi_size * 8 - bit_size;
+ const signedness: std.builtin.Signedness =
+ if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+ if (extra_bits > 0) try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .{ ._r, .sa },
+ .unsigned => .{ ._r, .sh },
+ }, src_ty, dst_mcv, .{ .immediate = extra_bits });
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
@@ -5049,37 +5200,43 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.typeOf(ty_op.operand);
- const src_abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const abi_size: u32 = @intCast(src_ty.abiSize(mod));
+ const bit_size: u32 = @intCast(src_ty.bitSize(mod));
const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
- const dst_reg = dst_mcv.register;
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const dst_mcv = try self.genByteSwap(inst, src_ty, src_mcv, false);
+ const dst_locks: [2]?RegisterLock = switch (dst_mcv) {
+ .register => |dst_reg| .{ self.register_manager.lockReg(dst_reg), null },
+ .register_pair => |dst_regs| self.register_manager.lockRegs(2, dst_regs),
+ else => unreachable,
+ };
+ defer for (dst_locks) |dst_lock| if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const tmp_lock = self.register_manager.lockReg(tmp_reg);
defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
- {
- const dst = registerAlias(dst_reg, src_abi_size);
- const tmp = registerAlias(tmp_reg, src_abi_size);
- const imm = if (src_abi_size > 4)
- try self.register_manager.allocReg(null, abi.RegisterClass.gp)
- else
- undefined;
+ const limb_abi_size: u32 = @min(abi_size, 8);
+ const tmp = registerAlias(tmp_reg, limb_abi_size);
+ const imm = if (limb_abi_size > 4)
+ try self.register_manager.allocReg(null, abi.RegisterClass.gp)
+ else
+ undefined;
+
+ const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - limb_abi_size * 8);
+ const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
+ const imm_00_11 = Immediate.u(mask / 0b01_01);
+ const imm_0_1 = Immediate.u(mask / 0b1_1);
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8);
- const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
- const imm_00_11 = Immediate.u(mask / 0b01_01);
- const imm_0_1 = Immediate.u(mask / 0b1_1);
+ for (dst_mcv.getRegs()) |dst_reg| {
+ const dst = registerAlias(dst_reg, limb_abi_size);
// dst = temp1 = bswap(operand)
try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst);
// tmp = temp1
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(4));
// dst = temp1 >> 4
- if (src_abi_size > 4) {
+ if (limb_abi_size > 4) {
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111);
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
@@ -5097,7 +5254,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
// tmp = temp2
try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(2));
// dst = temp2 >> 2
- if (src_abi_size > 4) {
+ if (limb_abi_size > 4) {
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11);
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
@@ -5109,7 +5266,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
// dst = (temp2 >> 2) & 0x33...33
try self.asmRegisterMemory(
.{ ._, .lea },
- if (src_abi_size > 4) tmp.to64() else tmp.to32(),
+ if (limb_abi_size > 4) tmp.to64() else tmp.to32(),
Memory.sib(.qword, .{
.base = .{ .reg = dst.to64() },
.scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
@@ -5120,7 +5277,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
// dst = temp3
try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(1));
// tmp = temp3 >> 1
- if (src_abi_size > 4) {
+ if (limb_abi_size > 4) {
try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1);
try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm);
try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm);
@@ -5132,7 +5289,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
// tmp = (temp3 >> 1) & 0x55...55
try self.asmRegisterMemory(
.{ ._, .lea },
- if (src_abi_size > 4) dst.to64() else dst.to32(),
+ if (limb_abi_size > 4) dst.to64() else dst.to32(),
Memory.sib(.qword, .{
.base = .{ .reg = tmp.to64() },
.scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
@@ -5141,15 +5298,13 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
// dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
}
- switch (self.regExtraBits(src_ty)) {
- 0 => {},
- else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh },
- src_ty,
- dst_mcv,
- .{ .immediate = extra },
- ),
- }
+ const extra_bits = abi_size * 8 - bit_size;
+ const signedness: std.builtin.Signedness =
+ if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
+ if (extra_bits > 0) try self.genShiftBinOpMir(switch (signedness) {
+ .signed => .{ ._r, .sa },
+ .unsigned => .{ ._r, .sh },
+ }, src_ty, dst_mcv, .{ .immediate = extra_bits });
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
@@ -5210,13 +5365,11 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
.child = (try mod.intType(.signed, scalar_bits)).ip_index,
});
- const sign_val = switch (tag) {
+ const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = switch (tag) {
.neg => try vec_ty.minInt(mod, vec_ty),
.abs => try vec_ty.maxInt(mod, vec_ty),
else => unreachable,
- };
-
- const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = sign_val });
+ } });
const sign_mem = if (sign_mcv.isMemory())
sign_mcv.mem(Memory.PtrSize.fromSize(abi_size))
else
@@ -5285,7 +5438,6 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ty = self.typeOf(un_op);
-
return self.floatSign(inst, un_op, ty);
}
@@ -5305,7 +5457,7 @@ const RoundMode = packed struct(u5) {
precision: enum(u1) {
normal = 0b0,
inexact = 0b1,
- },
+ } = .normal,
};
fn airRound(self: *Self, inst: Air.Inst.Index, mode: RoundMode) !void {
@@ -5371,11 +5523,11 @@ fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MC
if (ty.zigTypeTag(mod) != .Float)
return self.fail("TODO implement genRound for {}", .{ty.fmt(mod)});
- var callee: ["__trunc?".len]u8 = undefined;
+ var callee_buf: ["__trunc?".len]u8 = undefined;
return try self.genCall(.{ .lib = .{
.return_type = ty.toIntern(),
.param_types = &.{ty.toIntern()},
- .callee = std.fmt.bufPrint(&callee, "{s}{s}{s}", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "{s}{s}{s}", .{
floatLibcAbiPrefix(ty),
switch (mode.mode) {
.down => "floor",
@@ -5586,11 +5738,11 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
80, 128 => true,
else => unreachable,
}) {
- var callee: ["__sqrt?".len]u8 = undefined;
+ var callee_buf: ["__sqrt?".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = ty.toIntern(),
.param_types = &.{ty.toIntern()},
- .callee = std.fmt.bufPrint(&callee, "{s}sqrt{s}", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "{s}sqrt{s}", .{
floatLibcAbiPrefix(ty),
floatLibcAbiSuffix(ty),
}) catch unreachable,
@@ -5623,7 +5775,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
break :result dst_mcv;
},
@@ -5653,7 +5805,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
break :result dst_mcv;
},
@@ -5678,7 +5830,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
.{ .v_, .cvtps2ph },
dst_reg,
wide_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
break :result dst_mcv;
},
@@ -5741,11 +5893,11 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
fn airUnaryMath(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ty = self.typeOf(un_op);
- var callee: ["__round?".len]u8 = undefined;
+ var callee_buf: ["__round?".len]u8 = undefined;
const result = try self.genCall(.{ .lib = .{
.return_type = ty.toIntern(),
.param_types = &.{ty.toIntern()},
- .callee = std.fmt.bufPrint(&callee, "{s}{s}{s}", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "{s}{s}{s}", .{
floatLibcAbiPrefix(ty),
switch (tag) {
.sin,
@@ -5782,7 +5934,7 @@ fn reuseOperandAdvanced(
operand: Air.Inst.Ref,
op_index: Liveness.OperandInt,
mcv: MCValue,
- tracked_inst: Air.Inst.Index,
+ maybe_tracked_inst: ?Air.Inst.Index,
) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
@@ -5791,11 +5943,13 @@ fn reuseOperandAdvanced(
.register, .register_pair => for (mcv.getRegs()) |reg| {
// If it's in the registers table, need to associate the register(s) with the
// new instruction.
- if (!self.register_manager.isRegFree(reg)) {
- if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
- self.register_manager.registers[index] = tracked_inst;
+ if (maybe_tracked_inst) |tracked_inst| {
+ if (!self.register_manager.isRegFree(reg)) {
+ if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
+ self.register_manager.registers[index] = tracked_inst;
+ }
}
- }
+ } else self.register_manager.freeReg(reg);
},
.load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false,
else => return false,
@@ -5804,7 +5958,7 @@ fn reuseOperandAdvanced(
// Prevent the operand deaths processing code from deallocating it.
self.liveness.clearOperandDeath(inst, op_index);
const op_inst = Air.refToIndex(operand).?;
- self.getResolvedInstValue(op_inst).reuse(self, tracked_inst, op_inst);
+ self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst);
return true;
}
@@ -6934,11 +7088,11 @@ fn genMulDivBinOp(
),
else => {},
};
- var callee: ["__udiv?i3".len]u8 = undefined;
+ var callee_buf: ["__udiv?i3".len]u8 = undefined;
return try self.genCall(.{ .lib = .{
.return_type = dst_ty.toIntern(),
.param_types = &.{ src_ty.toIntern(), src_ty.toIntern() },
- .callee = std.fmt.bufPrint(&callee, "__{s}{s}{c}i3", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__{s}{s}{c}i3", .{
if (signed) "" else "u",
switch (tag) {
.div_trunc, .div_exact => "div",
@@ -7161,43 +7315,163 @@ fn genBinOp(
const rhs_ty = self.typeOf(rhs_air);
const abi_size: u32 = @intCast(lhs_ty.abiSize(mod));
- if (lhs_ty.isRuntimeFloat() and (air_tag == .rem or switch (lhs_ty.floatBits(self.target.*)) {
- 16 => !self.hasFeature(.f16c),
- 32, 64 => false,
- 80, 128 => true,
- else => unreachable,
- })) {
- var callee: ["__mod?f3".len]u8 = undefined;
+ if (lhs_ty.isRuntimeFloat()) libcall: {
+ const float_bits = lhs_ty.floatBits(self.target.*);
+ const type_needs_libcall = switch (float_bits) {
+ 16 => !self.hasFeature(.f16c),
+ 32, 64 => false,
+ 80, 128 => true,
+ else => unreachable,
+ };
+ switch (air_tag) {
+ .rem, .mod => {},
+ else => if (!type_needs_libcall) break :libcall,
+ }
+ var callee_buf: ["__mod?f3".len]u8 = undefined;
+ const callee = switch (air_tag) {
+ .add,
+ .sub,
+ .mul,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ => std.fmt.bufPrint(&callee_buf, "__{s}{c}f3", .{
+ @tagName(air_tag)[0..3],
+ floatCompilerRtAbiName(float_bits),
+ }),
+ .rem, .mod, .min, .max => std.fmt.bufPrint(&callee_buf, "{s}f{s}{s}", .{
+ floatLibcAbiPrefix(lhs_ty),
+ switch (air_tag) {
+ .rem, .mod => "mod",
+ .min => "min",
+ .max => "max",
+ else => unreachable,
+ },
+ floatLibcAbiSuffix(lhs_ty),
+ }),
+ else => return self.fail("TODO implement genBinOp for {s} {}", .{
+ @tagName(air_tag), lhs_ty.fmt(mod),
+ }),
+ } catch unreachable;
const result = try self.genCall(.{ .lib = .{
.return_type = lhs_ty.toIntern(),
.param_types = &.{ lhs_ty.toIntern(), rhs_ty.toIntern() },
- .callee = switch (air_tag) {
- .add,
- .sub,
- .mul,
- .div_float,
- .div_trunc,
- .div_floor,
- => std.fmt.bufPrint(&callee, "__{s}{c}f3", .{
- @tagName(air_tag)[0..3],
- floatCompilerRtAbiName(lhs_ty.floatBits(self.target.*)),
- }),
- .rem, .min, .max => std.fmt.bufPrint(&callee, "{s}f{s}{s}", .{
- floatLibcAbiPrefix(lhs_ty),
- switch (air_tag) {
- .rem => "mod",
- .min => "min",
- .max => "max",
- else => unreachable,
- },
- floatLibcAbiSuffix(lhs_ty),
- }),
- else => return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
- }),
- } catch unreachable,
+ .callee = callee,
} }, &.{ lhs_ty, rhs_ty }, &.{ .{ .air_ref = lhs_air }, .{ .air_ref = rhs_air } });
return switch (air_tag) {
+ .mod => result: {
+ const adjusted: MCValue = if (type_needs_libcall) adjusted: {
+ var add_callee_buf: ["__add?f3".len]u8 = undefined;
+ break :adjusted try self.genCall(.{ .lib = .{
+ .return_type = lhs_ty.toIntern(),
+ .param_types = &.{
+ lhs_ty.toIntern(),
+ rhs_ty.toIntern(),
+ },
+ .callee = std.fmt.bufPrint(&add_callee_buf, "__add{c}f3", .{
+ floatCompilerRtAbiName(float_bits),
+ }) catch unreachable,
+ } }, &.{ lhs_ty, rhs_ty }, &.{ result, .{ .air_ref = rhs_air } });
+ } else switch (float_bits) {
+ 16, 32, 64 => adjusted: {
+ const dst_reg = switch (result) {
+ .register => |reg| reg,
+ else => if (maybe_inst) |inst|
+ (try self.copyToRegisterWithInstTracking(inst, lhs_ty, result)).register
+ else
+ try self.copyToTmpRegister(lhs_ty, result),
+ };
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+
+ const rhs_mcv = try self.resolveInst(rhs_air);
+ const src_mcv: MCValue = if (float_bits == 16) src: {
+ assert(self.hasFeature(.f16c));
+ const tmp_reg = (try self.register_manager.allocReg(
+ null,
+ abi.RegisterClass.sse,
+ )).to128();
+ const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer self.register_manager.unlockReg(tmp_lock);
+
+ if (rhs_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
+ .{ .vp_w, .insr },
+ dst_reg,
+ dst_reg,
+ rhs_mcv.mem(.word),
+ Immediate.u(1),
+ ) else try self.asmRegisterRegisterRegister(
+ .{ .vp_, .unpcklwd },
+ dst_reg,
+ dst_reg,
+ (if (rhs_mcv.isRegister())
+ rhs_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(rhs_ty, rhs_mcv)).to128(),
+ );
+ try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg);
+ break :src .{ .register = tmp_reg };
+ } else rhs_mcv;
+
+ if (self.hasFeature(.avx)) {
+ const mir_tag: Mir.Inst.FixedTag = switch (float_bits) {
+ 16, 32 => .{ .v_ss, .add },
+ 64 => .{ .v_sd, .add },
+ else => unreachable,
+ };
+ if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
+ mir_tag,
+ dst_reg,
+ dst_reg,
+ src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)),
+ ) else try self.asmRegisterRegisterRegister(
+ mir_tag,
+ dst_reg,
+ dst_reg,
+ (if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
+ );
+ } else {
+ const mir_tag: Mir.Inst.FixedTag = switch (float_bits) {
+ 32 => .{ ._ss, .add },
+ 64 => .{ ._sd, .add },
+ else => unreachable,
+ };
+ if (src_mcv.isMemory()) try self.asmRegisterMemory(
+ mir_tag,
+ dst_reg,
+ src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)),
+ ) else try self.asmRegisterRegister(
+ mir_tag,
+ dst_reg,
+ (if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(),
+ );
+ }
+
+ if (float_bits == 16) try self.asmRegisterRegisterImmediate(
+ .{ .v_, .cvtps2ph },
+ dst_reg,
+ dst_reg,
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
+ );
+ break :adjusted .{ .register = dst_reg };
+ },
+ 80, 128 => return self.fail("TODO implement genBinOp for {s} of {}", .{
+ @tagName(air_tag), lhs_ty.fmt(mod),
+ }),
+ else => unreachable,
+ };
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = lhs_ty.toIntern(),
+ .param_types = &.{ lhs_ty.toIntern(), rhs_ty.toIntern() },
+ .callee = callee,
+ } }, &.{ lhs_ty, rhs_ty }, &.{ adjusted, .{ .air_ref = rhs_air } });
+ },
.div_trunc, .div_floor => try self.genRoundLibcall(lhs_ty, result, .{
.mode = switch (air_tag) {
.div_trunc => .zero,
@@ -7219,6 +7493,7 @@ fn genBinOp(
const maybe_mask_reg = switch (air_tag) {
else => null,
+ .rem, .mod => unreachable,
.max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias(
if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: {
try self.register_manager.getReg(.xmm0, null);
@@ -7226,20 +7501,23 @@ fn genBinOp(
} else try self.register_manager.allocReg(null, abi.RegisterClass.sse),
abi_size,
) else null,
- .rem, .mod => return self.fail("TODO implement genBinOp for {s} {}", .{
- @tagName(air_tag), lhs_ty.fmt(mod),
- }),
};
const mask_lock =
if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null;
defer if (mask_lock) |lock| self.register_manager.unlockReg(lock);
- const lhs_mcv = try self.resolveInst(lhs_air);
- const rhs_mcv = try self.resolveInst(rhs_air);
+ const ordered_air = if (lhs_ty.isVector(mod) and lhs_ty.childType(mod).isAbiInt(mod) and
+ switch (air_tag) {
+ .cmp_lt, .cmp_gte => true,
+ else => false,
+ }) .{ .lhs = rhs_air, .rhs = lhs_air } else .{ .lhs = lhs_air, .rhs = rhs_air };
+
+ const lhs_mcv = try self.resolveInst(ordered_air.lhs);
+ const rhs_mcv = try self.resolveInst(ordered_air.rhs);
switch (lhs_mcv) {
.immediate => |imm| switch (imm) {
0 => switch (air_tag) {
- .sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, rhs_air),
+ .sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, ordered_air.rhs),
else => {},
},
else => {},
@@ -7288,11 +7566,15 @@ fn genBinOp(
var copied_to_dst = true;
const dst_mcv: MCValue = dst: {
if (maybe_inst) |inst| {
- if ((!vec_op or lhs_mcv.isRegister()) and self.reuseOperand(inst, lhs_air, 0, lhs_mcv)) {
+ const tracked_inst = switch (air_tag) {
+ else => inst,
+ .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => null,
+ };
+ if ((!vec_op or lhs_mcv.isRegister()) and
+ self.reuseOperandAdvanced(inst, ordered_air.lhs, 0, lhs_mcv, tracked_inst))
break :dst lhs_mcv;
- }
if (is_commutative and (!vec_op or rhs_mcv.isRegister()) and
- self.reuseOperand(inst, rhs_air, 1, rhs_mcv))
+ self.reuseOperandAdvanced(inst, ordered_air.rhs, 1, rhs_mcv, tracked_inst))
{
flipped = true;
break :dst rhs_mcv;
@@ -7613,7 +7895,7 @@ fn genBinOp(
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
return dst_mcv;
},
@@ -7657,7 +7939,10 @@ fn genBinOp(
.sub,
.sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub },
- .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
+ .bit_and => if (self.hasFeature(.avx))
+ .{ .vp_, .@"and" }
+ else
+ .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
@@ -7688,6 +7973,20 @@ fn genBinOp(
else
null,
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx))
+ .{ .vp_b, .cmpgt }
+ else
+ .{ .p_b, .cmpgt },
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else .{ .p_b, .cmpeq },
else => null,
},
17...32 => switch (air_tag) {
@@ -7708,6 +8007,17 @@ fn genBinOp(
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx)) .{ .vp_b, .cmpgt } else null,
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else null,
else => null,
},
else => null,
@@ -7723,7 +8033,10 @@ fn genBinOp(
.mul,
.mul_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull },
- .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
+ .bit_and => if (self.hasFeature(.avx))
+ .{ .vp_, .@"and" }
+ else
+ .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
@@ -7746,6 +8059,20 @@ fn genBinOp(
else
.{ .p_w, .maxu },
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx))
+ .{ .vp_w, .cmpgt }
+ else
+ .{ .p_w, .cmpgt },
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else .{ .p_w, .cmpeq },
else => null,
},
9...16 => switch (air_tag) {
@@ -7769,6 +8096,17 @@ fn genBinOp(
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx)) .{ .vp_w, .cmpgt } else null,
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else null,
else => null,
},
else => null,
@@ -7789,7 +8127,10 @@ fn genBinOp(
.{ .p_d, .mull }
else
null,
- .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
+ .bit_and => if (self.hasFeature(.avx))
+ .{ .vp_, .@"and" }
+ else
+ .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
@@ -7820,6 +8161,20 @@ fn genBinOp(
else
null,
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx))
+ .{ .vp_d, .cmpgt }
+ else
+ .{ .p_d, .cmpgt },
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else .{ .p_d, .cmpeq },
else => null,
},
5...8 => switch (air_tag) {
@@ -7843,6 +8198,17 @@ fn genBinOp(
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
else => null,
},
else => null,
@@ -7855,9 +8221,33 @@ fn genBinOp(
.sub,
.sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub },
- .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
+ .bit_and => if (self.hasFeature(.avx))
+ .{ .vp_, .@"and" }
+ else
+ .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_gt,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx))
+ .{ .vp_q, .cmpgt }
+ else if (self.hasFeature(.sse4_2))
+ .{ .p_q, .cmpgt }
+ else
+ null,
+ .unsigned => null,
+ },
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx))
+ .{ .vp_q, .cmpeq }
+ else if (self.hasFeature(.sse4_1))
+ .{ .p_q, .cmpeq }
+ else
+ null,
else => null,
},
3...4 => switch (air_tag) {
@@ -7870,6 +8260,17 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
+ .cmp_eq,
+ .cmp_neq,
+ => if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_gt,
+ .cmp_gte,
+ => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
+ .signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
+ .unsigned => null,
+ },
else => null,
},
else => null,
@@ -7923,7 +8324,7 @@ fn genBinOp(
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
return dst_mcv;
},
@@ -7974,7 +8375,7 @@ fn genBinOp(
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
return dst_mcv;
},
@@ -8017,7 +8418,7 @@ fn genBinOp(
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg,
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
return dst_mcv;
},
@@ -8060,7 +8461,7 @@ fn genBinOp(
.{ .v_, .cvtps2ph },
dst_reg,
dst_reg.to256(),
- Immediate.u(0b1_00),
+ Immediate.u(@as(u5, @bitCast(RoundMode{ .mode = .mxcsr }))),
);
return dst_mcv;
},
@@ -8435,6 +8836,62 @@ fn genBinOp(
);
}
},
+ .cmp_lt,
+ .cmp_lte,
+ .cmp_eq,
+ .cmp_gte,
+ .cmp_gt,
+ .cmp_neq,
+ => {
+ switch (air_tag) {
+ .cmp_lt,
+ .cmp_eq,
+ .cmp_gt,
+ => {},
+ .cmp_lte,
+ .cmp_gte,
+ .cmp_neq,
+ => {
+ const unsigned_ty = try lhs_ty.toUnsigned(mod);
+ const not_mcv = try self.genTypedValue(.{
+ .ty = lhs_ty,
+ .val = try unsigned_ty.maxInt(mod, unsigned_ty),
+ });
+ const not_mem = if (not_mcv.isMemory())
+ not_mcv.mem(Memory.PtrSize.fromSize(abi_size))
+ else
+ Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{
+ .reg = try self.copyToTmpRegister(Type.usize, not_mcv.address()),
+ } });
+ switch (mir_tag[0]) {
+ .vp_b, .vp_d, .vp_q, .vp_w => try self.asmRegisterRegisterMemory(
+ .{ .vp_, .xor },
+ dst_reg,
+ dst_reg,
+ not_mem,
+ ),
+ .p_b, .p_d, .p_q, .p_w => try self.asmRegisterMemory(
+ .{ .p_, .xor },
+ dst_reg,
+ not_mem,
+ ),
+ else => unreachable,
+ }
+ },
+ else => unreachable,
+ }
+
+ const gp_reg = try self.register_manager.allocReg(maybe_inst, abi.RegisterClass.gp);
+ const gp_lock = self.register_manager.lockRegAssumeUnused(gp_reg);
+ defer self.register_manager.unlockReg(gp_lock);
+
+ try self.asmRegisterRegister(switch (mir_tag[0]) {
+ .vp_b, .vp_d, .vp_q, .vp_w => .{ .vp_b, .movmsk },
+ .p_b, .p_d, .p_q, .p_w => .{ .p_b, .movmsk },
+ else => unreachable,
+ }, gp_reg.to32(), dst_reg);
+ return .{ .register = gp_reg };
+ },
else => unreachable,
}
@@ -9378,11 +9835,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
80, 128 => true,
else => unreachable,
}) {
- var callee: ["__???f2".len]u8 = undefined;
+ var callee_buf: ["__???f2".len]u8 = undefined;
const ret = try self.genCall(.{ .lib = .{
.return_type = .i32_type,
.param_types = &.{ ty.toIntern(), ty.toIntern() },
- .callee = std.fmt.bufPrint(&callee, "__{s}{c}f2", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__{s}{c}f2", .{
switch (op) {
.eq => "eq",
.neq => "ne",
@@ -9741,8 +10198,15 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch});
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+ const dst_mcv = try self.genBinOp(
+ inst,
+ Air.Inst.Tag.fromCmpOp(extra.compareOperator(), false),
+ extra.lhs,
+ extra.rhs,
+ );
+ return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
}
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
@@ -10507,29 +10971,41 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
},
else => return self.fail("invalid constraint: '{s}'", .{constraint}),
};
+ const is_early_clobber = constraint[1] == '&';
+ const rest = constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
const arg_mcv: MCValue = arg_mcv: {
- const arg_maybe_reg: ?Register = if (mem.eql(u8, constraint[1..], "r"))
- self.register_manager.tryAllocReg(maybe_inst, self.regClassForType(ty)) orelse
- return self.fail("ran out of registers lowering inline asm", .{})
- else if (mem.eql(u8, constraint[1..], "m"))
+ const arg_maybe_reg: ?Register = if (mem.eql(u8, rest, "r") or
+ mem.eql(u8, rest, "f") or mem.eql(u8, rest, "x"))
+ registerAlias(
+ self.register_manager.tryAllocReg(maybe_inst, switch (rest[0]) {
+ 'r' => abi.RegisterClass.gp,
+ 'f' => abi.RegisterClass.x87,
+ 'x' => abi.RegisterClass.sse,
+ else => unreachable,
+ }) orelse return self.fail("ran out of registers lowering inline asm", .{}),
+ @intCast(ty.abiSize(mod)),
+ )
+ else if (mem.eql(u8, rest, "m"))
if (output != .none) null else return self.fail(
"memory constraint unsupported for asm result: '{s}'",
.{constraint},
)
- else if (mem.eql(u8, constraint[1..], "g") or
- mem.eql(u8, constraint[1..], "rm") or mem.eql(u8, constraint[1..], "mr") or
- mem.eql(u8, constraint[1..], "r,m") or mem.eql(u8, constraint[1..], "m,r"))
- self.register_manager.tryAllocReg(maybe_inst, self.regClassForType(ty)) orelse
+ else if (mem.eql(u8, rest, "g") or
+ mem.eql(u8, rest, "rm") or mem.eql(u8, rest, "mr") or
+ mem.eql(u8, rest, "r,m") or mem.eql(u8, rest, "m,r"))
+ self.register_manager.tryAllocReg(maybe_inst, abi.RegisterClass.gp) orelse
if (output != .none)
null
else
return self.fail("ran out of registers lowering inline asm", .{})
- else if (mem.startsWith(u8, constraint[1..], "{") and mem.endsWith(u8, constraint[1..], "}"))
- parseRegName(constraint[1 + "{".len .. constraint.len - "}".len]) orelse
+ else if (mem.startsWith(u8, rest, "{") and mem.endsWith(u8, rest, "}"))
+ parseRegName(rest["{".len .. rest.len - "}".len]) orelse
return self.fail("invalid register constraint: '{s}'", .{constraint})
- else if (constraint.len == 2 and std.ascii.isDigit(constraint[1])) {
- const index = std.fmt.charToDigit(constraint[0], 10) catch unreachable;
- if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{constraint});
+ else if (rest.len == 1 and std.ascii.isDigit(rest[0])) {
+ const index = std.fmt.charToDigit(rest[0], 10) catch unreachable;
+ if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{
+ constraint,
+ });
break :arg_mcv args.items[index];
} else return self.fail("invalid constraint: '{s}'", .{constraint});
break :arg_mcv if (arg_maybe_reg) |reg| .{ .register = reg } else arg: {
@@ -10563,10 +11039,29 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.typeOf(input);
const input_mcv = try self.resolveInst(input);
- const arg_mcv: MCValue = if (mem.eql(u8, constraint, "r")) switch (input_mcv) {
- .register => input_mcv,
- else => .{ .register = try self.copyToTmpRegister(ty, input_mcv) },
- } else if (mem.eql(u8, constraint, "m")) arg: {
+ const arg_mcv: MCValue = if (mem.eql(u8, constraint, "r") or
+ mem.eql(u8, constraint, "f") or mem.eql(u8, constraint, "x"))
+ arg: {
+ const rc = switch (constraint[0]) {
+ 'r' => abi.RegisterClass.gp,
+ 'f' => abi.RegisterClass.x87,
+ 'x' => abi.RegisterClass.sse,
+ else => unreachable,
+ };
+ if (input_mcv.isRegister() and
+ rc.isSet(RegisterManager.indexOfRegIntoTracked(input_mcv.getReg().?).?))
+ break :arg input_mcv;
+ const reg = try self.register_manager.allocReg(null, rc);
+ try self.genSetReg(reg, ty, input_mcv);
+ break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(mod))) };
+ } else if (mem.eql(u8, constraint, "i") or mem.eql(u8, constraint, "n"))
+ switch (input_mcv) {
+ .immediate => |imm| .{ .immediate = imm },
+ else => return self.fail("immediate operand requires comptime value: '{s}'", .{
+ constraint,
+ }),
+ }
+ else if (mem.eql(u8, constraint, "m")) arg: {
switch (input_mcv) {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg input_mcv,
@@ -10790,6 +11285,10 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
arg_map.get(op_str["%[".len .. colon orelse op_str.len - "]".len]) orelse
return self.fail("no matching constraint: '{s}'", .{op_str})
]) {
+ .immediate => |imm| if (mem.eql(u8, modifier, "") or mem.eql(u8, modifier, "c"))
+ .{ .imm = Immediate.u(imm) }
+ else
+ return self.fail("invalid modifier: '{s}'", .{modifier}),
.register => |reg| if (mem.eql(u8, modifier, ""))
.{ .reg = reg }
else
@@ -10816,6 +11315,10 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
) }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
+ .lea_got => |sym_index| if (mem.eql(u8, modifier, "P"))
+ .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .lea_got = sym_index }) }
+ else
+ return self.fail("invalid modifier: '{s}'", .{modifier}),
else => return self.fail("invalid constraint: '{s}'", .{op_str}),
};
} else if (mem.startsWith(u8, op_str, "$")) {
@@ -10918,6 +11421,13 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
.none => self.asmRegisterImmediate(mnem_fixed_tag, reg1, imm0),
.reg => |reg2| switch (ops[3]) {
.none => self.asmRegisterRegisterImmediate(mnem_fixed_tag, reg2, reg1, imm0),
+ .reg => |reg3| self.asmRegisterRegisterRegisterImmediate(
+ mnem_fixed_tag,
+ reg3,
+ reg2,
+ reg1,
+ imm0,
+ ),
else => error.InvalidInstruction,
},
.mem => |mem2| switch (ops[3]) {
@@ -11988,11 +12498,11 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
src_ty.fmt(mod), dst_ty.fmt(mod),
});
- var callee: ["__floatun?i?f".len]u8 = undefined;
+ var callee_buf: ["__floatun?i?f".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = dst_ty.toIntern(),
.param_types = &.{src_ty.toIntern()},
- .callee = std.fmt.bufPrint(&callee, "__float{s}{c}i{c}f", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__float{s}{c}i{c}f", .{
switch (src_signedness) {
.signed => "",
.unsigned => "un",
@@ -12067,11 +12577,11 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
src_ty.fmt(mod), dst_ty.fmt(mod),
});
- var callee: ["__fixuns?f?i".len]u8 = undefined;
+ var callee_buf: ["__fixuns?f?i".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = dst_ty.toIntern(),
.param_types = &.{src_ty.toIntern()},
- .callee = std.fmt.bufPrint(&callee, "__fix{s}{c}f{c}i", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "__fix{s}{c}f{c}i", .{
switch (dst_signedness) {
.signed => "",
.unsigned => "uns",
@@ -12592,7 +13102,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
.{ .i_, .mul },
len_reg,
len_reg,
- Immediate.u(elem_abi_size),
+ Immediate.s(elem_abi_size),
);
try self.genInlineMemcpy(second_elem_ptr_mcv, ptr, len_mcv);
@@ -12645,8 +13155,23 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
- .Slice => dst_ptr.address().offset(8).deref(),
- .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
+ .Slice => len: {
+ const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
+ const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
+ defer self.register_manager.unlockReg(len_lock);
+
+ try self.asmRegisterMemoryImmediate(
+ .{ .i_, .mul },
+ len_reg,
+ dst_ptr.address().offset(8).deref().mem(.qword),
+ Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(mod))),
+ );
+ break :len .{ .register = len_reg };
+ },
+ .One => len: {
+ const array_ty = dst_ptr_ty.childType(mod);
+ break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(mod) };
+ },
.C, .Many => unreachable,
};
const len_lock: ?RegisterLock = switch (len) {
@@ -12999,10 +13524,60 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
}
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const reduce = self.air.instructions.items(.data)[inst].reduce;
- _ = reduce;
- return self.fail("TODO implement airReduce for x86_64", .{});
- //return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
+
+ const result: MCValue = result: {
+ const operand_ty = self.typeOf(reduce.operand);
+ if (operand_ty.isVector(mod) and operand_ty.childType(mod).toIntern() == .bool_type) {
+ try self.spillEflagsIfOccupied();
+
+ const operand_mcv = try self.resolveInst(reduce.operand);
+ const mask_len = (std.math.cast(u6, operand_ty.vectorLen(mod)) orelse
+ return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}));
+ const mask = (@as(u64, 1) << mask_len) - 1;
+ const abi_size: u32 = @intCast(operand_ty.abiSize(mod));
+ switch (reduce.operation) {
+ .Or => {
+ if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
+ .{ ._, .@"test" },
+ operand_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ Immediate.u(mask),
+ ) else {
+ const operand_reg = registerAlias(if (operand_mcv.isRegister())
+ operand_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(operand_ty, operand_mcv), abi_size);
+ if (mask_len < abi_size * 8) try self.asmRegisterImmediate(
+ .{ ._, .@"test" },
+ operand_reg,
+ Immediate.u(mask),
+ ) else try self.asmRegisterRegister(
+ .{ ._, .@"test" },
+ operand_reg,
+ operand_reg,
+ );
+ }
+ break :result .{ .eflags = .nz };
+ },
+ .And => {
+ const tmp_reg = try self.copyToTmpRegister(operand_ty, operand_mcv);
+ const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
+ defer self.register_manager.unlockReg(tmp_lock);
+
+ try self.asmRegister(.{ ._, .not }, tmp_reg);
+ if (mask_len < abi_size * 8)
+ try self.asmRegisterImmediate(.{ ._, .@"test" }, tmp_reg, Immediate.u(mask))
+ else
+ try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_reg, tmp_reg);
+ break :result .{ .eflags = .z };
+ },
+ else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}),
+ }
+ }
+ return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)});
+ };
+ return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
@@ -13215,11 +13790,11 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
ty.fmt(mod),
});
- var callee: ["__fma?".len]u8 = undefined;
+ var callee_buf: ["__fma?".len]u8 = undefined;
break :result try self.genCall(.{ .lib = .{
.return_type = ty.toIntern(),
.param_types = &.{ ty.toIntern(), ty.toIntern(), ty.toIntern() },
- .callee = std.fmt.bufPrint(&callee, "{s}fma{s}", .{
+ .callee = std.fmt.bufPrint(&callee_buf, "{s}fma{s}", .{
floatLibcAbiPrefix(ty),
floatLibcAbiSuffix(ty),
}) catch unreachable,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index e03b0f01b5..ea00a0b627 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |mir_i| {
- const mir_index = @as(Mir.Inst.Index, @intCast(mir_i));
+ const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
- @as(u32, @intCast(emit.code.items.len)),
+ @intCast(emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
- const start_offset = @as(u32, @intCast(emit.code.items.len));
+ const start_offset: u32 = @intCast(emit.code.items.len);
try lowered_inst.encode(emit.code.writer(), .{});
- const end_offset = @as(u32, @intCast(emit.code.items.len));
+ const end_offset: u32 = @intCast(emit.code.items.len);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..];
@@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.source = start_offset,
.target = target,
.offset = end_offset - 4,
- .length = @as(u5, @intCast(end_offset - start_offset)),
+ .length = @intCast(end_offset - start_offset),
}),
.linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
// Add relocation to the decl.
@@ -220,7 +220,7 @@ const Reloc = struct {
/// Target of the relocation.
target: Mir.Inst.Index,
/// Offset of the relocation within the instruction.
- offset: usize,
+ offset: u32,
/// Length of the instruction.
length: u5,
};
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index 3ef835aa18..5dbd47657c 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -266,7 +266,12 @@ pub const Mnemonic = enum {
packssdw, packsswb, packuswb,
paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw,
pand, pandn, por, pxor,
+ pcmpeqb, pcmpeqd, pcmpeqw,
+ pcmpgtb, pcmpgtd, pcmpgtw,
pmulhw, pmullw,
+ pslld, psllq, psllw,
+ psrad, psraw,
+ psrld, psrlq, psrlw,
psubb, psubd, psubq, psubsb, psubsw, psubusb, psubusw, psubw,
// SSE
addps, addss,
@@ -278,11 +283,12 @@ pub const Mnemonic = enum {
maxps, maxss,
minps, minss,
movaps, movhlps, movlhps,
+ movmskps,
movss, movups,
mulps, mulss,
orps,
pextrw, pinsrw,
- pmaxsw, pmaxub, pminsw, pminub,
+ pmaxsw, pmaxub, pminsw, pminub, pmovmskb,
shufps,
sqrtps, sqrtss,
subps, subss,
@@ -301,12 +307,13 @@ pub const Mnemonic = enum {
minpd, minsd,
movapd,
movdqa, movdqu,
+ movmskpd,
//movsd,
movupd,
mulpd, mulsd,
orpd,
- pshufhw, pshuflw,
- psrld, psrlq, psrlw,
+ pshufd, pshufhw, pshuflw,
+ pslldq, psrldq,
punpckhbw, punpckhdq, punpckhqdq, punpckhwd,
punpcklbw, punpckldq, punpcklqdq, punpcklwd,
shufpd,
@@ -317,19 +324,29 @@ pub const Mnemonic = enum {
// SSE3
movddup, movshdup, movsldup,
// SSSE3
- pabsb, pabsd, pabsw,
+ pabsb, pabsd, pabsw, palignr,
// SSE4.1
blendpd, blendps, blendvpd, blendvps,
extractps,
insertps,
packusdw,
+ pcmpeqq,
pextrb, pextrd, pextrq,
pinsrb, pinsrd, pinsrq,
pmaxsb, pmaxsd, pmaxud, pmaxuw, pminsb, pminsd, pminud, pminuw,
pmulld,
roundpd, roundps, roundsd, roundss,
+ // SSE4.2
+ pcmpgtq,
+ // PCLMUL
+ pclmulqdq,
+ // AES
+ aesdec, aesdeclast, aesenc, aesenclast, aesimc, aeskeygenassist,
+ // SHA
+ sha256msg1, sha256msg2, sha256rnds2,
// AVX
vaddpd, vaddps, vaddsd, vaddss,
+ vaesdec, vaesdeclast, vaesenc, vaesenclast, vaesimc, vaeskeygenassist,
vandnpd, vandnps, vandpd, vandps,
vblendpd, vblendps, vblendvpd, vblendvps,
vbroadcastf128, vbroadcastsd, vbroadcastss,
@@ -348,6 +365,7 @@ pub const Mnemonic = enum {
vmovddup,
vmovdqa, vmovdqu,
vmovhlps, vmovlhps,
+ vmovmskpd, vmovmskps,
vmovq,
vmovsd,
vmovshdup, vmovsldup,
@@ -358,15 +376,20 @@ pub const Mnemonic = enum {
vpabsb, vpabsd, vpabsw,
vpackssdw, vpacksswb, vpackusdw, vpackuswb,
vpaddb, vpaddd, vpaddq, vpaddsb, vpaddsw, vpaddusb, vpaddusw, vpaddw,
- vpand, vpandn,
+ vpalignr, vpand, vpandn, vpclmulqdq,
+ vpcmpeqb, vpcmpeqd, vpcmpeqq, vpcmpeqw,
+ vpcmpgtb, vpcmpgtd, vpcmpgtq, vpcmpgtw,
vpextrb, vpextrd, vpextrq, vpextrw,
vpinsrb, vpinsrd, vpinsrq, vpinsrw,
vpmaxsb, vpmaxsd, vpmaxsw, vpmaxub, vpmaxud, vpmaxuw,
vpminsb, vpminsd, vpminsw, vpminub, vpminud, vpminuw,
+ vpmovmskb,
vpmulhw, vpmulld, vpmullw,
vpor,
- vpshufhw, vpshuflw,
- vpsrld, vpsrlq, vpsrlw,
+ vpshufd, vpshufhw, vpshuflw,
+ vpslld, vpslldq, vpsllq, vpsllw,
+ vpsrad, vpsraq, vpsraw,
+ vpsrld, vpsrldq, vpsrlq, vpsrlw,
vpsubb, vpsubd, vpsubq, vpsubsb, vpsubsw, vpsubusb, vpsubusw, vpsubw,
vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd,
vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd,
@@ -742,6 +765,8 @@ pub const Mode = enum {
pub const Feature = enum {
none,
+ aes,
+ @"aes avx",
avx,
avx2,
bmi,
@@ -749,12 +774,18 @@ pub const Feature = enum {
fma,
lzcnt,
movbe,
+ pclmul,
+ @"pclmul avx",
popcnt,
sse,
sse2,
sse3,
sse4_1,
+ sse4_2,
ssse3,
+ sha,
+ vaes,
+ vpclmulqdq,
x87,
};
@@ -772,7 +803,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
}
const mnemonic_to_encodings_map = init: {
- @setEvalBranchQuota(50_000);
+ @setEvalBranchQuota(60_000);
const encodings = @import("encodings.zig");
var entries = encodings.table;
std.mem.sort(encodings.Entry, &entries, {}, struct {
diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig
index ae5f86d6b0..5ac3c3a72c 100644
--- a/src/arch/x86_64/Lower.zig
+++ b/src/arch/x86_64/Lower.zig
@@ -190,7 +190,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_probe_align_ri_s => {
try lower.emit(.none, .@"test", &.{
.{ .reg = inst.data.ri.r1 },
- .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
+ .{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) },
});
try lower.emit(.none, .jz, &.{
.{ .imm = lower.reloc(.{ .inst = index + 1 }) },
@@ -226,14 +226,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
}
try lower.emit(.none, .sub, &.{
.{ .reg = inst.data.ri.r1 },
- .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
+ .{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) },
});
assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts);
},
.pseudo_probe_adjust_setup_rri_s => {
try lower.emit(.none, .mov, &.{
.{ .reg = inst.data.rri.r2.to32() },
- .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) },
+ .{ .imm = Immediate.s(@bitCast(inst.data.rri.i)) },
});
try lower.emit(.none, .sub, &.{
.{ .reg = inst.data.rri.r1 },
@@ -291,7 +291,9 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.i_s,
.mi_sib_s,
.mi_rip_s,
- => Immediate.s(@as(i32, @bitCast(i))),
+ .rmi_sib_s,
+ .rmi_rip_s,
+ => Immediate.s(@bitCast(i)),
.rrri,
.rri_u,
@@ -301,6 +303,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.mi_rip_u,
.rmi_sib,
.rmi_rip,
+ .rmi_sib_u,
+ .rmi_rip_u,
.mri_sib,
.mri_rip,
.rrm_sib,
@@ -319,6 +323,8 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
return lower.mir.resolveFrameLoc(switch (ops) {
.rm_sib,
.rmi_sib,
+ .rmi_sib_s,
+ .rmi_sib_u,
.m_sib,
.mi_sib_u,
.mi_sib_s,
@@ -335,6 +341,8 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
.rm_rip,
.rmi_rip,
+ .rmi_rip_s,
+ .rmi_rip_u,
.m_rip,
.mi_rip_u,
.mi_rip_s,
@@ -383,13 +391,29 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.rrri => inst.data.rrri.fixes,
.rri_s, .rri_u => inst.data.rri.fixes,
.ri_s, .ri_u => inst.data.ri.fixes,
- .ri64, .rm_sib, .rm_rip, .mr_sib, .mr_rip => inst.data.rx.fixes,
+ .ri64,
+ .rm_sib,
+ .rm_rip,
+ .rmi_sib_s,
+ .rmi_sib_u,
+ .rmi_rip_s,
+ .rmi_rip_u,
+ .mr_sib,
+ .mr_rip,
+ => inst.data.rx.fixes,
.mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes,
.rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes,
.rrmi_sib, .rrmi_rip => inst.data.rrix.fixes,
.mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => inst.data.x.fixes,
.m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes,
- .extern_fn_reloc, .got_reloc, .extern_got_reloc, .direct_reloc, .direct_got_reloc, .import_reloc, .tlv_reloc => ._,
+ .extern_fn_reloc,
+ .got_reloc,
+ .extern_got_reloc,
+ .direct_reloc,
+ .direct_got_reloc,
+ .import_reloc,
+ .tlv_reloc,
+ => ._,
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
};
try lower.emit(switch (fixes) {
@@ -461,7 +485,7 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.m_sib, .m_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
},
- .mi_sib_s, .mi_sib_u, .mi_rip_u, .mi_rip_s => &.{
+ .mi_sib_s, .mi_sib_u, .mi_rip_s, .mi_rip_u => &.{
.{ .mem = lower.mem(inst.ops, inst.data.x.payload + 1) },
.{ .imm = lower.imm(
inst.ops,
@@ -477,6 +501,14 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
+ .rmi_sib_s, .rmi_sib_u, .rmi_rip_s, .rmi_rip_u => &.{
+ .{ .reg = inst.data.rx.r1 },
+ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload + 1) },
+ .{ .imm = lower.imm(
+ inst.ops,
+ lower.mir.extraData(Mir.Imm32, inst.data.rx.payload).data.imm,
+ ) },
+ },
.mr_sib, .mr_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 23bef3c03b..8fec05280a 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -474,6 +474,10 @@ pub const Inst = struct {
/// Bitwise logical and not of packed single-precision floating-point values
/// Bitwise logical and not of packed double-precision floating-point values
andn,
+ /// Compare packed data for equal
+ cmpeq,
+ /// Compare packed data for greater than
+ cmpgt,
/// Maximum of packed signed integers
maxs,
/// Maximum of packed unsigned integers
@@ -482,10 +486,20 @@ pub const Inst = struct {
mins,
/// Minimum of packed unsigned integers
minu,
+ /// Move byte mask
+ /// Extract packed single precision floating-point sign mask
+ /// Extract packed double precision floating-point sign mask
+ movmsk,
/// Multiply packed signed integers and store low result
mull,
/// Multiply packed signed integers and store high result
mulh,
+ /// Shift packed data left logical
+ sll,
+ /// Shift packed data right arithmetic
+ sra,
+ /// Shift packed data right logical
+ srl,
/// Subtract packed signed integers with signed saturation
subs,
/// Subtract packed unsigned integers with unsigned saturation
@@ -584,15 +598,13 @@ pub const Inst = struct {
movdqu,
/// Packed interleave shuffle of quadruplets of single-precision floating-point values
/// Packed interleave shuffle of pairs of double-precision floating-point values
+ /// Shuffle packed doublewords
+ /// Shuffle packed words
shuf,
/// Shuffle packed high words
shufh,
/// Shuffle packed low words
shufl,
- /// Shift packed data right logical
- /// Shift packed data right logical
- /// Shift packed data right logical
- srl,
/// Unpack high data
unpckhbw,
/// Unpack high data
@@ -617,6 +629,9 @@ pub const Inst = struct {
/// Replicate single floating-point values
movsldup,
+ /// Packed align right
+ alignr,
+
/// Pack with unsigned saturation
ackusd,
/// Blend packed single-precision floating-point values
@@ -640,6 +655,29 @@ pub const Inst = struct {
/// Round scalar double-precision floating-point value
round,
+ /// Carry-less multiplication quadword
+ clmulq,
+
+ /// Perform one round of an AES decryption flow
+ aesdec,
+ /// Perform last round of an AES decryption flow
+ aesdeclast,
+ /// Perform one round of an AES encryption flow
+ aesenc,
+ /// Perform last round of an AES encryption flow
+ aesenclast,
+ /// Perform the AES InvMixColumn transformation
+ aesimc,
+ /// AES round key generation assist
+ aeskeygenassist,
+
+ /// Perform an intermediate calculation for the next four SHA256 message dwords
+ sha256msg1,
+ /// Perform a final calculation for the next four SHA256 message dwords
+ sha256msg2,
+ /// Perform two rounds of SHA256 operation
+ sha256rnds2,
+
/// Load with broadcast floating-point data
broadcast,
@@ -720,9 +758,24 @@ pub const Inst = struct {
/// Register, memory (RIP) operands.
/// Uses `rx` payload.
rm_rip,
- /// Register, memory (SIB), immediate (byte) operands.
+ /// Register, memory (SIB), immediate (word) operands.
/// Uses `rix` payload with extra data of type `MemorySib`.
rmi_sib,
+ /// Register, memory (RIP), immediate (word) operands.
+ /// Uses `rix` payload with extra data of type `MemoryRip`.
+ rmi_rip,
+ /// Register, memory (SIB), immediate (signed) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
+ rmi_sib_s,
+ /// Register, memory (SIB), immediate (unsigned) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
+ rmi_sib_u,
+ /// Register, memory (RIP), immediate (signed) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
+ rmi_rip_s,
+ /// Register, memory (RIP), immediate (unsigned) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
+ rmi_rip_u,
/// Register, register, memory (RIP).
/// Uses `rrix` payload with extra data of type `MemoryRip`.
rrm_rip,
@@ -735,27 +788,24 @@ pub const Inst = struct {
/// Register, register, memory (SIB), immediate (byte) operands.
/// Uses `rrix` payload with extra data of type `MemorySib`.
rrmi_sib,
- /// Register, memory (RIP), immediate (byte) operands.
- /// Uses `rix` payload with extra data of type `MemoryRip`.
- rmi_rip,
/// Single memory (SIB) operand.
/// Uses `x` with extra data of type `MemorySib`.
m_sib,
/// Single memory (RIP) operand.
/// Uses `x` with extra data of type `MemoryRip`.
m_rip,
- /// Memory (SIB), immediate (unsigned) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
- mi_sib_u,
- /// Memory (RIP), immediate (unsigned) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
- mi_rip_u,
/// Memory (SIB), immediate (sign-extend) operands.
/// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
mi_sib_s,
+ /// Memory (SIB), immediate (unsigned) operands.
+ /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
+ mi_sib_u,
/// Memory (RIP), immediate (sign-extend) operands.
/// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
mi_rip_s,
+ /// Memory (RIP), immediate (unsigned) operands.
+ /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
+ mi_rip_u,
/// Memory (SIB), register operands.
/// Uses `rx` payload with extra data of type `MemorySib`.
mr_sib,
@@ -768,10 +818,10 @@ pub const Inst = struct {
/// Memory (RIP), register, register operands.
/// Uses `rrx` payload with extra data of type `MemoryRip`.
mrr_rip,
- /// Memory (SIB), register, immediate (byte) operands.
+ /// Memory (SIB), register, immediate (word) operands.
/// Uses `rix` payload with extra data of type `MemorySib`.
mri_sib,
- /// Memory (RIP), register, immediate (byte) operands.
+ /// Memory (RIP), register, immediate (word) operands.
/// Uses `rix` payload with extra data of type `MemoryRip`.
mri_rip,
/// Rax, Memory moffs.
@@ -955,7 +1005,7 @@ pub const Inst = struct {
rix: struct {
fixes: Fixes = ._,
r1: Register,
- i: u8,
+ i: u16,
payload: u32,
},
/// Register, register, byte immediate, followed by Custom payload found in extra.
@@ -1010,7 +1060,7 @@ pub const RegisterList = struct {
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
for (registers, 0..) |cpreg, i| {
- if (reg.id() == cpreg.id()) return @as(u32, @intCast(i));
+ if (reg.id() == cpreg.id()) return @intCast(i);
}
unreachable; // register not in input register list!
}
@@ -1030,7 +1080,7 @@ pub const RegisterList = struct {
}
pub fn count(self: Self) u32 {
- return @as(u32, @intCast(self.bitset.count()));
+ return @intCast(self.bitset.count());
}
};
@@ -1044,14 +1094,14 @@ pub const Imm64 = struct {
pub fn encode(v: u64) Imm64 {
return .{
- .msb = @as(u32, @truncate(v >> 32)),
- .lsb = @as(u32, @truncate(v)),
+ .msb = @truncate(v >> 32),
+ .lsb = @truncate(v),
};
}
pub fn decode(imm: Imm64) u64 {
var res: u64 = 0;
- res |= (@as(u64, @intCast(imm.msb)) << 32);
+ res |= @as(u64, @intCast(imm.msb)) << 32;
res |= @as(u64, @intCast(imm.lsb));
return res;
}
@@ -1075,7 +1125,7 @@ pub const MemorySib = struct {
assert(sib.scale_index.scale == 0 or std.math.isPowerOfTwo(sib.scale_index.scale));
return .{
.ptr_size = @intFromEnum(sib.ptr_size),
- .base_tag = @intFromEnum(@as(Memory.Base.Tag, sib.base)),
+ .base_tag = @intFromEnum(sib.base),
.base = switch (sib.base) {
.none => undefined,
.reg => |r| @intFromEnum(r),
@@ -1091,18 +1141,18 @@ pub const MemorySib = struct {
}
pub fn decode(msib: MemorySib) Memory {
- const scale = @as(u4, @truncate(msib.scale_index));
+ const scale: u4 = @truncate(msib.scale_index);
assert(scale == 0 or std.math.isPowerOfTwo(scale));
return .{ .sib = .{
- .ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)),
+ .ptr_size = @enumFromInt(msib.ptr_size),
.base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) {
.none => .none,
- .reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) },
- .frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) },
+ .reg => .{ .reg = @enumFromInt(msib.base) },
+ .frame => .{ .frame = @enumFromInt(msib.base) },
},
.scale_index = .{
.scale = scale,
- .index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined,
+ .index = if (scale > 0) @enumFromInt(msib.scale_index >> 4) else undefined,
},
.disp = msib.disp,
} };
@@ -1124,7 +1174,7 @@ pub const MemoryRip = struct {
pub fn decode(mrip: MemoryRip) Memory {
return .{ .rip = .{
- .ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)),
+ .ptr_size = @enumFromInt(mrip.ptr_size),
.disp = mrip.disp,
} };
}
@@ -1141,14 +1191,14 @@ pub const MemoryMoffs = struct {
pub fn encode(seg: Register, offset: u64) MemoryMoffs {
return .{
.seg = @intFromEnum(seg),
- .msb = @as(u32, @truncate(offset >> 32)),
- .lsb = @as(u32, @truncate(offset >> 0)),
+ .msb = @truncate(offset >> 32),
+ .lsb = @truncate(offset >> 0),
};
}
pub fn decode(moffs: MemoryMoffs) Memory {
return .{ .moffs = .{
- .seg = @as(Register, @enumFromInt(moffs.seg)),
+ .seg = @enumFromInt(moffs.seg),
.offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
} };
}
@@ -1168,7 +1218,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @as(i32, @bitCast(mir.extra[i])),
+ i32 => @bitCast(mir.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig
index 5cffaf4fe0..695f2d585a 100644
--- a/src/arch/x86_64/bits.zig
+++ b/src/arch/x86_64/bits.zig
@@ -232,7 +232,7 @@ pub const Register = enum(u7) {
else => unreachable,
// zig fmt: on
};
- return @as(u6, @intCast(@intFromEnum(reg) - base));
+ return @intCast(@intFromEnum(reg) - base);
}
pub fn bitSize(reg: Register) u64 {
@@ -291,11 +291,11 @@ pub const Register = enum(u7) {
else => unreachable,
// zig fmt: on
};
- return @as(u4, @truncate(@intFromEnum(reg) - base));
+ return @truncate(@intFromEnum(reg) - base);
}
pub fn lowEnc(reg: Register) u3 {
- return @as(u3, @truncate(reg.enc()));
+ return @truncate(reg.enc());
}
pub fn toBitSize(reg: Register, bit_size: u64) Register {
@@ -325,19 +325,19 @@ pub const Register = enum(u7) {
}
pub fn to64(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)));
+ return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax));
}
pub fn to32(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)));
+ return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax));
}
pub fn to16(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)));
+ return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax));
}
pub fn to8(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)));
+ return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al));
}
fn sseBase(reg: Register) u7 {
@@ -350,11 +350,11 @@ pub const Register = enum(u7) {
}
pub fn to256(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)));
+ return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0));
}
pub fn to128(reg: Register) Register {
- return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)));
+ return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0));
}
/// DWARF register encoding
@@ -619,7 +619,7 @@ pub const Immediate = union(enum) {
1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))),
16 => @as(i16, @bitCast(@as(u16, @intCast(x)))),
32 => @as(i32, @bitCast(@as(u32, @intCast(x)))),
- 64 => @as(i64, @bitCast(x)),
+ 64 => @bitCast(x),
else => unreachable,
},
};
diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig
index d6efb4cfc7..5c4c345895 100644
--- a/src/arch/x86_64/encodings.zig
+++ b/src/arch/x86_64/encodings.zig
@@ -905,6 +905,9 @@ pub const table = [_]Entry{
.{ .movlhps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .none, .sse },
+ .{ .movmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse },
+ .{ .movmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse },
+
.{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse },
.{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse },
@@ -917,6 +920,9 @@ pub const table = [_]Entry{
.{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse },
+ .{ .pmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse },
+ .{ .pmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse },
+
.{ .shufps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .none, .sse },
.{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse },
@@ -1005,6 +1011,12 @@ pub const table = [_]Entry{
.{ .movdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .none, .sse2 },
.{ .movdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .none, .sse2 },
+ .{ .movmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 },
+ .{ .movmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 },
+
+ .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 },
+ .{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 },
+
.{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .none, .sse2 },
.{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .none, .sse2 },
@@ -1037,6 +1049,14 @@ pub const table = [_]Entry{
.{ .pandn, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .none, .sse2 },
+ .{ .pcmpeqb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .none, .sse2 },
+ .{ .pcmpeqw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .none, .sse2 },
+ .{ .pcmpeqd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .none, .sse2 },
+
+ .{ .pcmpgtb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .none, .sse2 },
+ .{ .pcmpgtw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .none, .sse2 },
+ .{ .pcmpgtd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .none, .sse2 },
+
.{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 },
.{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 },
@@ -1055,10 +1075,26 @@ pub const table = [_]Entry{
.{ .por, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .none, .sse2 },
+ .{ .pshufd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .none, .sse2 },
+
.{ .pshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .none, .sse2 },
.{ .pshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .none, .sse2 },
+ .{ .psllw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .none, .sse2 },
+ .{ .psllw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .none, .sse2 },
+ .{ .pslld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .none, .sse2 },
+ .{ .pslld, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .none, .sse2 },
+ .{ .psllq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .none, .sse2 },
+ .{ .psllq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .none, .sse2 },
+
+ .{ .pslldq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .none, .sse2 },
+
+ .{ .psraw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .none, .sse2 },
+ .{ .psraw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .none, .sse2 },
+ .{ .psrad, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .none, .sse2 },
+ .{ .psrad, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .none, .sse2 },
+
.{ .psrlw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .none, .sse2 },
.{ .psrlw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .none, .sse2 },
.{ .psrld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .none, .sse2 },
@@ -1066,6 +1102,8 @@ pub const table = [_]Entry{
.{ .psrlq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .none, .sse2 },
.{ .psrlq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .none, .sse2 },
+ .{ .psrldq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .none, .sse2 },
+
.{ .psubb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .none, .sse2 },
.{ .psubw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .none, .sse2 },
.{ .psubd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .none, .sse2 },
@@ -1100,9 +1138,6 @@ pub const table = [_]Entry{
.{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 },
- .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 },
- .{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 },
-
.{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .none, .sse2 },
.{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .none, .sse2 },
@@ -1122,13 +1157,17 @@ pub const table = [_]Entry{
.{ .pabsw, .rm, &.{ .mm, .mm_m64 }, &.{ 0x0f, 0x38, 0x1d }, 0, .none, .ssse3 },
.{ .pabsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1d }, 0, .none, .ssse3 },
+ .{ .palignr, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .none, .ssse3 },
+
// SSE4.1
.{ .blendpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .none, .sse4_1 },
.{ .blendps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .none, .sse4_1 },
+ .{ .blendvpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x15 }, 0, .none, .sse4_1 },
.{ .blendvpd, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x15 }, 0, .none, .sse4_1 },
+ .{ .blendvps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x14 }, 0, .none, .sse4_1 },
.{ .blendvps, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x14 }, 0, .none, .sse4_1 },
.{ .extractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .none, .sse4_1 },
@@ -1137,6 +1176,8 @@ pub const table = [_]Entry{
.{ .packusdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .none, .sse4_1 },
+ .{ .pcmpeqq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .none, .sse4_1 },
+
.{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 },
.{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 },
.{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 },
@@ -1171,6 +1212,33 @@ pub const table = [_]Entry{
.{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 },
+ // SSE4.2
+ .{ .pcmpgtq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .none, .sse4_2 },
+
+ // PCLMUL
+ .{ .pclmulqdq, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .none, .pclmul },
+
+ // AES
+ .{ .aesdec, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .none, .aes },
+
+ .{ .aesdeclast, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .none, .aes },
+
+ .{ .aesenc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .none, .aes },
+
+ .{ .aesenclast, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .none, .aes },
+
+ .{ .aesimc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdb }, 0, .none, .aes },
+
+ .{ .aeskeygenassist, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0xdf }, 0, .none, .aes },
+
+ // SHA
+ .{ .sha256msg1, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcc }, 0, .none, .sha },
+
+ .{ .sha256msg2, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcd }, 0, .none, .sha },
+
+ .{ .sha256rnds2, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcb }, 0, .none, .sha },
+ .{ .sha256rnds2, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x0f, 0x38, 0xcb }, 0, .none, .sha },
+
// AVX
.{ .vaddpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_128_wig, .avx },
.{ .vaddpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_256_wig, .avx },
@@ -1182,6 +1250,18 @@ pub const table = [_]Entry{
.{ .vaddss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx },
+ .{ .vaesdec, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .vex_128_wig, .@"aes avx" },
+
+ .{ .vaesdeclast, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .vex_128_wig, .@"aes avx" },
+
+ .{ .vaesenc, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .vex_128_wig, .@"aes avx" },
+
+ .{ .vaesenclast, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .vex_128_wig, .@"aes avx" },
+
+ .{ .vaesimc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdb }, 0, .vex_128_wig, .@"aes avx" },
+
+ .{ .vaeskeygenassist, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0xdf }, 0, .vex_128_wig, .@"aes avx" },
+
.{ .vandnpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_128_wig, .avx },
.{ .vandnpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_256_wig, .avx },
@@ -1295,6 +1375,16 @@ pub const table = [_]Entry{
.{ .vmaxss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx },
+ .{ .vmovmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
+ .{ .vmovmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
+ .{ .vmovmskps, .rm, &.{ .r32, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
+ .{ .vmovmskps, .rm, &.{ .r64, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
+
+ .{ .vmovmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
+ .{ .vmovmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
+ .{ .vmovmskpd, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
+ .{ .vmovmskpd, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
+
.{ .vminpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_128_wig, .avx },
.{ .vminpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_256_wig, .avx },
@@ -1404,10 +1494,26 @@ pub const table = [_]Entry{
.{ .vpaddusb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_128_wig, .avx },
.{ .vpaddusw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_128_wig, .avx },
+ .{ .vpalignr, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .vex_128_wig, .avx },
+
.{ .vpand, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_128_wig, .avx },
.{ .vpandn, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_128_wig, .avx },
+ .{ .vpclmulqdq, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .vex_128_wig, .@"pclmul avx" },
+
+ .{ .vpcmpeqb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_128_wig, .avx },
+ .{ .vpcmpeqw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_128_wig, .avx },
+ .{ .vpcmpeqd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpcmpeqq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpcmpgtb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_128_wig, .avx },
+ .{ .vpcmpgtw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_128_wig, .avx },
+ .{ .vpcmpgtd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpcmpgtq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_128_wig, .avx },
+
.{ .vpextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .vex_128_w0, .avx },
.{ .vpextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w0, .avx },
.{ .vpextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w1, .avx },
@@ -1439,6 +1545,9 @@ pub const table = [_]Entry{
.{ .vpminud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_128_wig, .avx },
+ .{ .vpmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx },
+ .{ .vpmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx },
+
.{ .vpmulhw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_128_wig, .avx },
.{ .vpmulld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_128_wig, .avx },
@@ -1447,6 +1556,26 @@ pub const table = [_]Entry{
.{ .vpor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_128_wig, .avx },
+ .{ .vpshufd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .vex_128_wig, .avx },
+
+ .{ .vpsllw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .vex_128_wig, .avx },
+ .{ .vpsllw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .vex_128_wig, .avx },
+ .{ .vpslld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .vex_128_wig, .avx },
+ .{ .vpslld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .vex_128_wig, .avx },
+ .{ .vpsllq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .vex_128_wig, .avx },
+ .{ .vpsllq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .vex_128_wig, .avx },
+
+ .{ .vpslldq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .vex_128_wig, .avx },
+
+ .{ .vpsraw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .vex_128_wig, .avx },
+ .{ .vpsraw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .vex_128_wig, .avx },
+ .{ .vpsrad, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .vex_128_wig, .avx },
+ .{ .vpsrad, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .vex_128_wig, .avx },
+
.{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx },
.{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128_wig, .avx },
.{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128_wig, .avx },
@@ -1454,6 +1583,8 @@ pub const table = [_]Entry{
.{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128_wig, .avx },
.{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128_wig, .avx },
+ .{ .vpsrldq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .vex_128_wig, .avx },
+
.{ .vpsubb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_128_wig, .avx },
.{ .vpsubw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_128_wig, .avx },
.{ .vpsubd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_128_wig, .avx },
@@ -1550,6 +1681,18 @@ pub const table = [_]Entry{
.{ .vfmadd213ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_lig_w0, .fma },
.{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w0, .fma },
+ // VPCLMULQDQ
+ .{ .vpclmulqdq, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .vex_256_wig, .vpclmulqdq },
+
+ // VAES
+ .{ .vaesdec, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .vex_256_wig, .vaes },
+
+ .{ .vaesdeclast, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .vex_256_wig, .vaes },
+
+ .{ .vaesenc, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .vex_256_wig, .vaes },
+
+ .{ .vaesenclast, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .vex_256_wig, .vaes },
+
// AVX2
.{ .vbroadcastss, .rm, &.{ .xmm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx2 },
.{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 },
@@ -1577,36 +1720,73 @@ pub const table = [_]Entry{
.{ .vpaddusb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_256_wig, .avx2 },
.{ .vpaddusw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_256_wig, .avx2 },
+ .{ .vpalignr, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .vex_256_wig, .avx2 },
+
.{ .vpand, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_256_wig, .avx2 },
.{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 },
- .{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx },
- .{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx },
- .{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx },
+ .{ .vpcmpeqb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpcmpeqw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpcmpeqd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpcmpeqq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpcmpgtb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpcmpgtw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpcmpgtd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpcmpgtq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx2 },
+ .{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx2 },
+ .{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx2 },
- .{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx },
- .{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx },
+ .{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx2 },
+ .{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx2 },
- .{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx },
+ .{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx2 },
- .{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx },
- .{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx },
- .{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx },
+ .{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx2 },
+ .{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx2 },
- .{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx },
- .{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx },
+ .{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx2 },
+ .{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx2 },
- .{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx },
+ .{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx2 },
- .{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx },
+ .{ .vpmovmskb, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpmovmskb, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 },
- .{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx },
+ .{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx2 },
- .{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx },
+ .{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx2 },
.{ .vpor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_256_wig, .avx2 },
+ .{ .vpshufd, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpshufhw, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpshuflw, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 },
+
+ .{ .vpsllw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpsllw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .vex_256_wig, .avx2 },
+ .{ .vpslld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpslld, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .vex_256_wig, .avx2 },
+ .{ .vpsllq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpsllq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .vex_256_wig, .avx2 },
+
+ .{ .vpslldq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .vex_256_wig, .avx2 },
+
+ .{ .vpsraw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpsraw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .vex_256_wig, .avx2 },
+ .{ .vpsrad, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .vex_256_wig, .avx2 },
+ .{ .vpsrad, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .vex_256_wig, .avx2 },
+
.{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 },
.{ .vpsrlw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_256_wig, .avx2 },
.{ .vpsrld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_256_wig, .avx2 },
@@ -1614,6 +1794,8 @@ pub const table = [_]Entry{
.{ .vpsrlq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_256_wig, .avx2 },
.{ .vpsrlq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_256_wig, .avx2 },
+ .{ .vpsrldq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .vex_128_wig, .avx2 },
+
.{ .vpsubb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_256_wig, .avx2 },
.{ .vpsubw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_256_wig, .avx2 },
.{ .vpsubd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_256_wig, .avx2 },
diff --git a/test/behavior/bitreverse.zig b/test/behavior/bitreverse.zig
index 722edef25e..b254910e46 100644
--- a/test/behavior/bitreverse.zig
+++ b/test/behavior/bitreverse.zig
@@ -11,11 +11,11 @@ test "@bitReverse large exotic integer" {
test "@bitReverse" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try comptime testBitReverse();
try testBitReverse();
diff --git a/test/behavior/byteswap.zig b/test/behavior/byteswap.zig
index ce33834ffa..88c5372364 100644
--- a/test/behavior/byteswap.zig
+++ b/test/behavior/byteswap.zig
@@ -4,11 +4,11 @@ const expect = std.testing.expect;
test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
const ByteSwapIntTest = struct {
fn run() !void {
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 579e73b17d..ac7a70b744 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -1321,7 +1321,7 @@ test "remainder division" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
@@ -1401,9 +1401,9 @@ test "float modulo division using @mod" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try comptime fmod(f16);
try comptime fmod(f32);
diff --git a/test/behavior/popcount.zig b/test/behavior/popcount.zig
index da152d4dc5..eda7346a2d 100644
--- a/test/behavior/popcount.zig
+++ b/test/behavior/popcount.zig
@@ -14,7 +14,6 @@ test "@popCount integers" {
}
test "@popCount 128bit integer" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO