aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorCody Tapscott <topolarity@tapscott.me>2022-10-18 11:37:43 -0700
committerCody Tapscott <topolarity@tapscott.me>2022-10-28 08:41:04 -0700
commit3295fee9116789f144e6406493116c451aee7c57 (patch)
tree71f10d7a5b987b956d0811d925424fea57fddd09 /test
parentc639c225444c9252515949786e139494fb728861 (diff)
downloadzig-3295fee9116789f144e6406493116c451aee7c57.tar.gz
zig-3295fee9116789f144e6406493116c451aee7c57.zip
stage2: Use mem.readPackedInt etc. for packed bitcasts
Packed memory has a well-defined layout that doesn't require conversion from an integer to read from. Let's use it :-) This change means that for bitcasting to/from a packed value that is N layers deep, we no longer have to create N temporary big-ints and perform N copies. Other miscellaneous improvements: - Adds support for casting to packed enums and vectors - Fixes bitcasting to/from vectors outside of a packed struct - Adds a fast path for bitcasting <= u/i64 - Fixes bug when bitcasting f80 which would clear following fields This also changes the bitcast memory layout of exotic integers on big-endian systems to match what's empirically observed on our targets. Technically, this layout is not guaranteed by LLVM so we should probably ban bitcasts that reveal these padding bits, but for now this is an improvement.
Diffstat (limited to 'test')
-rw-r--r--test/behavior/bitcast.zig87
1 files changed, 86 insertions, 1 deletions
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index c629a1a34b..b225c31858 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -63,6 +63,10 @@ fn testBitCast(comptime N: usize) !void {
try expect(conv_iN(N, 0) == 0);
try expect(conv_iN(N, -0) == 0);
+
+ if (N > 24) {
+ try expect(conv_uN(N, 0xf23456) == 0xf23456);
+ }
}
fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) {
@@ -73,6 +77,55 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
return @bitCast(std.meta.Int(.signed, N), x);
}
+test "bitcast uX to bytes" {
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+
+ const bit_values = [_]usize{ 1, 48, 27, 512, 493, 293, 125, 204, 112 };
+ inline for (bit_values) |bits| {
+ try testBitCast(bits);
+ comptime try testBitCast(bits);
+ }
+}
+
+fn testBitCastuXToBytes(comptime N: usize) !void {
+
+ // The location of padding bits in these layouts are technically not defined
+ // by LLVM, but we currently allow exotic integers to be cast (at comptime)
+ // to types that expose their padding bits anyway.
+ //
+ // This test at least makes sure those bits are matched by the runtime behavior
+ // on the platforms we target. If the above behavior is restricted after all,
+ // this test should be deleted.
+
+ const T = std.meta.Int(.unsigned, N);
+ for ([_]T{ 0, ~@as(T, 0) }) |init_value| {
+ var x: T = init_value;
+ const bytes = std.mem.asBytes(&x);
+
+ const byte_count = (N + 7) / 8;
+ switch (builtin.cpu.arch.endian()) {
+ .Little => {
+ var byte_i = 0;
+ while (byte_i < (byte_count - 1)) : (byte_i += 1) {
+ try expect(bytes[byte_i] == 0xff);
+ }
+ try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0);
+ },
+ .Big => {
+ var byte_i = byte_count - 1;
+ while (byte_i > 0) : (byte_i -= 1) {
+ try expect(bytes[byte_i] == 0xff);
+ }
+ try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0);
+ },
+ }
+ }
+}
+
test "nested bitcast" {
const S = struct {
fn moo(x: isize) !void {
@@ -283,7 +336,8 @@ test "@bitCast packed struct of floats" {
comptime try S.doTheTest();
}
-test "comptime @bitCast packed struct to int" {
+test "comptime @bitCast packed struct to int and back" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@@ -304,6 +358,37 @@ test "comptime @bitCast packed struct to int" {
vectorf: @Vector(2, f16) = .{ 3.14, 2.71 },
};
const Int = @typeInfo(S).Struct.backing_integer.?;
+
+ // S -> Int
var s: S = .{};
try expectEqual(@bitCast(Int, s), comptime @bitCast(Int, S{}));
+
+ // Int -> S
+ var i: Int = 0;
+ const rt_cast = @bitCast(S, i);
+ const ct_cast = comptime @bitCast(S, @as(Int, 0));
+ inline for (@typeInfo(S).Struct.fields) |field| {
+ if (@typeInfo(field.field_type) == .Vector)
+ continue; //TODO: https://github.com/ziglang/zig/issues/13201
+
+ try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name));
+ }
+}
+
+test "comptime bitcast with fields following a float" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO: https://github.com/ziglang/zig/issues/13214
+
+ const FloatT = extern struct { f: f80, x: u128 };
+ var x: FloatT = .{ .f = 0.5, .x = 123 };
+ try expect(@bitCast(u256, x) == comptime @bitCast(u256, @as(FloatT, .{ .f = 0.5, .x = 123 })));
+}
+
+test "bitcast vector to integer and back" {
+ if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO: https://github.com/ziglang/zig/issues/13220
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest; // stage1 gets the comptime cast wrong
+
+ const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14;
+ var x = @splat(16, true);
+ x[1] = false;
+ try expect(@bitCast(u16, x) == comptime @bitCast(u16, @as(@Vector(16, bool), arr)));
}