aboutsummaryrefslogtreecommitdiff
path: root/src/Sema.zig
diff options
context:
space:
mode:
authorCody Tapscott <topolarity@tapscott.me>2022-10-18 11:37:43 -0700
committerCody Tapscott <topolarity@tapscott.me>2022-10-28 08:41:04 -0700
commit3295fee9116789f144e6406493116c451aee7c57 (patch)
tree71f10d7a5b987b956d0811d925424fea57fddd09 /src/Sema.zig
parentc639c225444c9252515949786e139494fb728861 (diff)
downloadzig-3295fee9116789f144e6406493116c451aee7c57.tar.gz
zig-3295fee9116789f144e6406493116c451aee7c57.zip
stage2: Use mem.readPackedInt etc. for packed bitcasts
Packed memory has a well-defined layout that doesn't require conversion from an integer to read from. Let's use it :-) This change means that for bitcasting to/from a packed value that is N layers deep, we no longer have to create N temporary big-ints and perform N copies. Other miscellaneous improvements: - Adds support for casting to packed enums and vectors - Fixes bitcasting to/from vectors outside of a packed struct - Adds a fast path for bitcasting <= u/i64 - Fixes bug when bitcasting f80 which would clear following fields This also changes the bitcast memory layout of exotic integers on big-endian systems to match what's empirically observed on our targets. Technically, this layout is not guaranteed by LLVM so we should probably ban bitcasts that reveal these padding bits, but for now this is an improvement.
Diffstat (limited to 'src/Sema.zig')
-rw-r--r--src/Sema.zig42
1 files changed, 0 insertions, 42 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 9f425b7bcf..fbc8d1dd8e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -26445,48 +26445,6 @@ fn bitCastVal(
const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val;
- // Some conversions have a bitwise definition that ignores in-memory layout,
- // such as converting between f80 and u80.
-
- if (old_ty.eql(Type.f80, sema.mod) and new_ty.isAbiInt()) {
- const float = val.toFloat(f80);
- switch (new_ty.intInfo(target).signedness) {
- .signed => {
- const int = @bitCast(i80, float);
- const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
- const big_int = std.math.big.int.Mutable.init(limbs, int);
- return Value.fromBigInt(sema.arena, big_int.toConst());
- },
- .unsigned => {
- const int = @bitCast(u80, float);
- const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
- const big_int = std.math.big.int.Mutable.init(limbs, int);
- return Value.fromBigInt(sema.arena, big_int.toConst());
- },
- }
- }
-
- if (new_ty.eql(Type.f80, sema.mod) and old_ty.isAbiInt()) {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try val.toBigIntAdvanced(&bigint_space, target, sema.kit(block, src));
- switch (old_ty.intInfo(target).signedness) {
- .signed => {
- // This conversion cannot fail because we already checked bit size before
- // calling bitCastVal.
- const int = bigint.to(i80) catch unreachable;
- const float = @bitCast(f80, int);
- return Value.Tag.float_80.create(sema.arena, float);
- },
- .unsigned => {
- // This conversion cannot fail because we already checked bit size before
- // calling bitCastVal.
- const int = bigint.to(u80) catch unreachable;
- const float = @bitCast(f80, int);
- return Value.Tag.float_80.create(sema.arena, float);
- },
- }
- }
-
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));