aboutsummaryrefslogtreecommitdiff
path: root/src/value.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-07-01 15:52:54 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-07-01 15:52:54 -0700
commitc89dd15e1be4959800dc7092d7dd4375253db7bc (patch)
treeca184ae53592efa21e67128a5f891d642d7f1118 /src/value.zig
parent5466e87fce581f2ef90ac23bb80b1dbc05836fc6 (diff)
parent2360f8c490f3ec684ed64ff28e8c1fade249070b (diff)
downloadzig-c89dd15e1be4959800dc7092d7dd4375253db7bc.tar.gz
zig-c89dd15e1be4959800dc7092d7dd4375253db7bc.zip
Merge remote-tracking branch 'origin/master' into llvm14
Diffstat (limited to 'src/value.zig')
-rw-r--r--src/value.zig3089
1 files changed, 2374 insertions, 715 deletions
diff --git a/src/value.zig b/src/value.zig
index e444e2daf1..90cdf82834 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -8,6 +8,8 @@ const Target = std.Target;
const Allocator = std.mem.Allocator;
const Module = @import("Module.zig");
const Air = @import("Air.zig");
+const TypedValue = @import("TypedValue.zig");
+const Sema = @import("Sema.zig");
/// This is the raw data, with no bookkeeping, no memory awareness,
/// no de-duplication, and no type system awareness.
@@ -20,6 +22,7 @@ pub const Value = extern union {
tag_if_small_enough: Tag,
ptr_otherwise: *Payload,
+ // Keep in sync with tools/zig-gdb.py
pub const Tag = enum(usize) {
// The first section of this enum are tags that require no payload.
u1_type,
@@ -27,6 +30,7 @@ pub const Value = extern union {
i8_type,
u16_type,
i16_type,
+ u29_type,
u32_type,
i32_type,
u64_type,
@@ -97,7 +101,6 @@ pub const Value = extern union {
bool_false,
generic_poison,
- abi_align_default,
empty_struct_value,
empty_array, // See last_no_payload_tag below.
// After this, the tag requires a payload.
@@ -118,19 +121,19 @@ pub const Value = extern union {
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
+ /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value.
+ comptime_field_ptr,
/// Pointer to a specific element of an array, vector or slice.
elem_ptr,
/// Pointer to a specific field of a struct or union.
field_ptr,
/// A slice of u8 whose memory is managed externally.
bytes,
+ /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`.
+ str_lit,
/// This value is repeated some number of times. The amount of times to repeat
/// is stored externally.
repeated,
- /// Each element stored as a `Value`.
- /// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
- /// so the slice length will be one more than the type's array length.
- array,
/// An array with length 0 but it has a sentinel.
empty_array_sentinel,
/// Pointer and length as sub `Value` objects.
@@ -138,6 +141,7 @@ pub const Value = extern union {
float_16,
float_32,
float_64,
+ float_80,
float_128,
enum_literal,
/// A specific enum tag, indicated by the field index (declaration order).
@@ -161,8 +165,11 @@ pub const Value = extern union {
opt_payload,
/// A pointer to the payload of an optional, based on a pointer to an optional.
opt_payload_ptr,
- /// An instance of a struct.
- @"struct",
+ /// An instance of a struct, array, or vector.
+ /// Each element/field stored as a `Value`.
+ /// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
+ /// so the slice length will be one more than the type's array length.
+ aggregate,
/// An instance of a union.
@"union",
/// This is a special value that tracks a set of types that have been stored
@@ -175,6 +182,10 @@ pub const Value = extern union {
/// and refers directly to the air. It will never be referenced by the air itself.
/// TODO: This is probably a bad encoding, maybe put temp data in the sema instead.
bound_fn,
+ /// The ABI alignment of the payload type.
+ lazy_align,
+ /// The ABI alignment of the payload type.
+ lazy_size,
pub const last_no_payload_tag = Tag.empty_array;
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
@@ -186,6 +197,7 @@ pub const Value = extern union {
.i8_type,
.u16_type,
.i16_type,
+ .u29_type,
.u32_type,
.i32_type,
.u64_type,
@@ -240,7 +252,6 @@ pub const Value = extern union {
.null_value,
.bool_true,
.bool_false,
- .abi_align_default,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
@@ -262,28 +273,34 @@ pub const Value = extern union {
.int_big_negative,
=> Payload.BigInt,
- .extern_fn,
- .decl_ref,
- => Payload.Decl,
+ .extern_fn => Payload.ExternFn,
+
+ .decl_ref => Payload.Decl,
.repeated,
.eu_payload,
- .eu_payload_ptr,
.opt_payload,
- .opt_payload_ptr,
.empty_array_sentinel,
=> Payload.SubValue,
+ .eu_payload_ptr,
+ .opt_payload_ptr,
+ => Payload.PayloadPtr,
+
.bytes,
.enum_literal,
=> Payload.Bytes,
- .array => Payload.Array,
+ .str_lit => Payload.StrLit,
.slice => Payload.Slice,
.enum_field_index => Payload.U32,
- .ty => Payload.Ty,
+ .ty,
+ .lazy_align,
+ .lazy_size,
+ => Payload.Ty,
+
.int_type => Payload.IntType,
.int_u64 => Payload.U64,
.int_i64 => Payload.I64,
@@ -295,13 +312,15 @@ pub const Value = extern union {
.float_16 => Payload.Float_16,
.float_32 => Payload.Float_32,
.float_64 => Payload.Float_64,
+ .float_80 => Payload.Float_80,
.float_128 => Payload.Float_128,
.@"error" => Payload.Error,
.inferred_alloc => Payload.InferredAlloc,
.inferred_alloc_comptime => Payload.InferredAllocComptime,
- .@"struct" => Payload.Struct,
+ .aggregate => Payload.Aggregate,
.@"union" => Payload.Union,
.bound_fn => Payload.BoundFn,
+ .comptime_field_ptr => Payload.ComptimeFieldPtr,
};
}
@@ -380,6 +399,7 @@ pub const Value = extern union {
.i8_type,
.u16_type,
.i16_type,
+ .u29_type,
.u32_type,
.i32_type,
.u64_type,
@@ -434,7 +454,6 @@ pub const Value = extern union {
.bool_true,
.bool_false,
.empty_struct_value,
- .abi_align_default,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
@@ -453,8 +472,8 @@ pub const Value = extern union {
.bound_fn,
=> unreachable,
- .ty => {
- const payload = self.castTag(.ty).?;
+ .ty, .lazy_align, .lazy_size => {
+ const payload = self.cast(Payload.Ty).?;
const new_payload = try arena.create(Payload.Ty);
new_payload.* = .{
.base = payload.base,
@@ -475,10 +494,36 @@ pub const Value = extern union {
return Value{ .ptr_otherwise = &new_payload.base };
},
.function => return self.copyPayloadShallow(arena, Payload.Function),
- .extern_fn => return self.copyPayloadShallow(arena, Payload.Decl),
+ .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn),
.variable => return self.copyPayloadShallow(arena, Payload.Variable),
.decl_ref => return self.copyPayloadShallow(arena, Payload.Decl),
.decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut),
+ .eu_payload_ptr,
+ .opt_payload_ptr,
+ => {
+ const payload = self.cast(Payload.PayloadPtr).?;
+ const new_payload = try arena.create(Payload.PayloadPtr);
+ new_payload.* = .{
+ .base = payload.base,
+ .data = .{
+ .container_ptr = try payload.data.container_ptr.copy(arena),
+ .container_ty = try payload.data.container_ty.copy(arena),
+ },
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ .comptime_field_ptr => {
+ const payload = self.cast(Payload.ComptimeFieldPtr).?;
+ const new_payload = try arena.create(Payload.ComptimeFieldPtr);
+ new_payload.* = .{
+ .base = payload.base,
+ .data = .{
+ .field_val = try payload.data.field_val.copy(arena),
+ .field_ty = try payload.data.field_ty.copy(arena),
+ },
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
.elem_ptr => {
const payload = self.castTag(.elem_ptr).?;
const new_payload = try arena.create(Payload.ElemPtr);
@@ -486,6 +531,7 @@ pub const Value = extern union {
.base = payload.base,
.data = .{
.array_ptr = try payload.data.array_ptr.copy(arena),
+ .elem_ty = try payload.data.elem_ty.copy(arena),
.index = payload.data.index,
},
};
@@ -498,17 +544,25 @@ pub const Value = extern union {
.base = payload.base,
.data = .{
.container_ptr = try payload.data.container_ptr.copy(arena),
+ .container_ty = try payload.data.container_ty.copy(arena),
.field_index = payload.data.field_index,
},
};
return Value{ .ptr_otherwise = &new_payload.base };
},
- .bytes => return self.copyPayloadShallow(arena, Payload.Bytes),
+ .bytes => {
+ const bytes = self.castTag(.bytes).?.data;
+ const new_payload = try arena.create(Payload.Bytes);
+ new_payload.* = .{
+ .base = .{ .tag = .bytes },
+ .data = try arena.dupe(u8, bytes),
+ };
+ return Value{ .ptr_otherwise = &new_payload.base };
+ },
+ .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit),
.repeated,
.eu_payload,
- .eu_payload_ptr,
.opt_payload,
- .opt_payload_ptr,
.empty_array_sentinel,
=> {
const payload = self.cast(Payload.SubValue).?;
@@ -519,18 +573,6 @@ pub const Value = extern union {
};
return Value{ .ptr_otherwise = &new_payload.base };
},
- .array => {
- const payload = self.castTag(.array).?;
- const new_payload = try arena.create(Payload.Array);
- new_payload.* = .{
- .base = payload.base,
- .data = try arena.alloc(Value, payload.data.len),
- };
- for (new_payload.data) |*elem, i| {
- elem.* = try payload.data[i].copy(arena);
- }
- return Value{ .ptr_otherwise = &new_payload.base };
- },
.slice => {
const payload = self.castTag(.slice).?;
const new_payload = try arena.create(Payload.Slice);
@@ -546,6 +588,7 @@ pub const Value = extern union {
.float_16 => return self.copyPayloadShallow(arena, Payload.Float_16),
.float_32 => return self.copyPayloadShallow(arena, Payload.Float_32),
.float_64 => return self.copyPayloadShallow(arena, Payload.Float_64),
+ .float_80 => return self.copyPayloadShallow(arena, Payload.Float_80),
.float_128 => return self.copyPayloadShallow(arena, Payload.Float_128),
.enum_literal => {
const payload = self.castTag(.enum_literal).?;
@@ -559,15 +602,15 @@ pub const Value = extern union {
.enum_field_index => return self.copyPayloadShallow(arena, Payload.U32),
.@"error" => return self.copyPayloadShallow(arena, Payload.Error),
- .@"struct" => {
- const old_field_values = self.castTag(.@"struct").?.data;
- const new_payload = try arena.create(Payload.Struct);
+ .aggregate => {
+ const payload = self.castTag(.aggregate).?;
+ const new_payload = try arena.create(Payload.Aggregate);
new_payload.* = .{
- .base = .{ .tag = .@"struct" },
- .data = try arena.alloc(Value, old_field_values.len),
+ .base = payload.base,
+ .data = try arena.alloc(Value, payload.data.len),
};
- for (old_field_values) |old_field_val, i| {
- new_payload.data[i] = try old_field_val.copy(arena);
+ for (new_payload.data) |*elem, i| {
+ elem.* = try payload.data[i].copy(arena);
}
return Value{ .ptr_otherwise = &new_payload.base };
},
@@ -597,9 +640,17 @@ pub const Value = extern union {
return Value{ .ptr_otherwise = &new_payload.base };
}
- /// TODO this should become a debug dump() function. In order to print values in a meaningful way
+ pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
+ _ = val;
+ _ = fmt;
+ _ = options;
+ _ = writer;
+ @compileError("do not use format values directly; use either fmtDebug or fmtValue");
+ }
+
+ /// This is a debug function. In order to print values in a meaningful way
/// we also need access to the type.
- pub fn format(
+ pub fn dump(
start_val: Value,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
@@ -612,6 +663,7 @@ pub const Value = extern union {
.u8_type => return out_stream.writeAll("u8"),
.i8_type => return out_stream.writeAll("i8"),
.u16_type => return out_stream.writeAll("u16"),
+ .u29_type => return out_stream.writeAll("u29"),
.i16_type => return out_stream.writeAll("i16"),
.u32_type => return out_stream.writeAll("u32"),
.i32_type => return out_stream.writeAll("i32"),
@@ -670,12 +722,11 @@ pub const Value = extern union {
.prefetch_options_type => return out_stream.writeAll("std.builtin.PrefetchOptions"),
.export_options_type => return out_stream.writeAll("std.builtin.ExportOptions"),
.extern_options_type => return out_stream.writeAll("std.builtin.ExternOptions"),
- .type_info_type => return out_stream.writeAll("std.builtin.TypeInfo"),
- .abi_align_default => return out_stream.writeAll("(default ABI alignment)"),
+ .type_info_type => return out_stream.writeAll("std.builtin.Type"),
.empty_struct_value => return out_stream.writeAll("struct {}{}"),
- .@"struct" => {
- return out_stream.writeAll("(struct value)");
+ .aggregate => {
+ return out_stream.writeAll("(aggregate)");
},
.@"union" => {
return out_stream.writeAll("(union value)");
@@ -689,7 +740,17 @@ pub const Value = extern union {
.the_only_possible_value => return out_stream.writeAll("(the only possible value)"),
.bool_true => return out_stream.writeAll("true"),
.bool_false => return out_stream.writeAll("false"),
- .ty => return val.castTag(.ty).?.data.format("", options, out_stream),
+ .ty => return val.castTag(.ty).?.data.dump("", options, out_stream),
+ .lazy_align => {
+ try out_stream.writeAll("@alignOf(");
+ try val.castTag(.lazy_align).?.data.dump("", options, out_stream);
+ return try out_stream.writeAll(")");
+ },
+ .lazy_size => {
+ try out_stream.writeAll("@sizeOf(");
+ try val.castTag(.lazy_size).?.data.dump("", options, out_stream);
+ return try out_stream.writeAll(")");
+ },
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return out_stream.print("{s}{d}", .{
@@ -701,14 +762,20 @@ pub const Value = extern union {
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream),
.int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
- .function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}),
+ .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}),
.extern_fn => return out_stream.writeAll("(extern function)"),
.variable => return out_stream.writeAll("(variable)"),
.decl_ref_mut => {
- const decl = val.castTag(.decl_ref_mut).?.data.decl;
- return out_stream.print("(decl_ref_mut '{s}')", .{decl.name});
+ const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
+ return out_stream.print("(decl_ref_mut {d})", .{decl_index});
+ },
+ .decl_ref => {
+ const decl_index = val.castTag(.decl_ref).?.data;
+ return out_stream.print("(decl_ref {d})", .{decl_index});
+ },
+ .comptime_field_ptr => {
+ return out_stream.writeAll("(comptime_field_ptr)");
},
- .decl_ref => return out_stream.writeAll("(decl ref)"),
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
try out_stream.print("&[{}] ", .{elem_ptr.index});
@@ -723,16 +790,22 @@ pub const Value = extern union {
.enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
.enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}),
.bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
+ .str_lit => {
+ const str_lit = val.castTag(.str_lit).?.data;
+ return out_stream.print("(.str_lit index={d} len={d})", .{
+ str_lit.index, str_lit.len,
+ });
+ },
.repeated => {
try out_stream.writeAll("(repeated) ");
val = val.castTag(.repeated).?.data;
},
- .array => return out_stream.writeAll("(array)"),
.empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"),
.slice => return out_stream.writeAll("(slice)"),
.float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}),
.float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}),
.float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}),
+ .float_80 => return out_stream.print("{}", .{val.castTag(.float_80).?.data}),
.float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}),
.@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
// TODO to print this it should be error{ Set, Items }!T(val), but we need the type for that
@@ -748,11 +821,11 @@ pub const Value = extern union {
.inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"),
.eu_payload_ptr => {
try out_stream.writeAll("(eu_payload_ptr)");
- val = val.castTag(.eu_payload_ptr).?.data;
+ val = val.castTag(.eu_payload_ptr).?.data.container_ptr;
},
.opt_payload_ptr => {
try out_stream.writeAll("(opt_payload_ptr)");
- val = val.castTag(.opt_payload_ptr).?.data;
+ val = val.castTag(.opt_payload_ptr).?.data.container_ptr;
},
.bound_fn => {
const bound_func = val.castTag(.bound_fn).?.data;
@@ -761,9 +834,21 @@ pub const Value = extern union {
};
}
+ pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) {
+ return .{ .data = val };
+ }
+
+ pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) {
+ return .{ .data = .{
+ .tv = .{ .ty = ty, .val = val },
+ .mod = mod,
+ } };
+ }
+
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
- pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator) ![]u8 {
+ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
+ const target = mod.getTarget();
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
@@ -771,30 +856,41 @@ pub const Value = extern union {
const adjusted_bytes = bytes[0..adjusted_len];
return allocator.dupe(u8, adjusted_bytes);
},
+ .str_lit => {
+ const str_lit = val.castTag(.str_lit).?.data;
+ const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ return allocator.dupe(u8, bytes);
+ },
.enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
.repeated => {
- const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt());
+ const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target));
const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen()));
std.mem.set(u8, result, byte);
return result;
},
.decl_ref => {
- const decl = val.castTag(.decl_ref).?.data;
+ const decl_index = val.castTag(.decl_ref).?.data;
+ const decl = mod.declPtr(decl_index);
const decl_val = try decl.value();
- return decl_val.toAllocatedBytes(decl.ty, allocator);
+ return decl_val.toAllocatedBytes(decl.ty, allocator, mod);
},
.the_only_possible_value => return &[_]u8{},
- .slice => return toAllocatedBytes(val.castTag(.slice).?.data.ptr, ty, allocator),
- else => {
- const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen()));
- var elem_value_buf: ElemValueBuffer = undefined;
- for (result) |*elem, i| {
- const elem_val = val.elemValueBuffer(i, &elem_value_buf);
- elem.* = @intCast(u8, elem_val.toUnsignedInt());
- }
- return result;
+ .slice => {
+ const slice = val.castTag(.slice).?.data;
+ return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod);
},
+ else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod),
+ }
+ }
+
+ fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
+ const result = try allocator.alloc(u8, @intCast(usize, len));
+ var elem_value_buf: ElemValueBuffer = undefined;
+ for (result) |*elem, i| {
+ const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
+ elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
}
+ return result;
}
pub const ToTypeBuffer = Type.Payload.Bits;
@@ -808,6 +904,7 @@ pub const Value = extern union {
.i8_type => Type.initTag(.i8),
.u16_type => Type.initTag(.u16),
.i16_type => Type.initTag(.i16),
+ .u29_type => Type.initTag(.u29),
.u32_type => Type.initTag(.u32),
.i32_type => Type.initTag(.i32),
.u64_type => Type.initTag(.u64),
@@ -905,6 +1002,10 @@ pub const Value = extern union {
assert(ty.enumFieldCount() == 1);
break :blk 0;
},
+ .enum_literal => i: {
+ const name = val.castTag(.enum_literal).?.data;
+ break :i ty.enumFieldIndex(name).?;
+ },
// Assume it is already an integer and return it directly.
else => return val,
};
@@ -949,8 +1050,19 @@ pub const Value = extern union {
}
/// Asserts the value is an integer.
- pub fn toBigInt(self: Value, space: *BigIntSpace) BigIntConst {
- switch (self.tag()) {
+ pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst {
+ return val.toBigIntAdvanced(space, target, null) catch unreachable;
+ }
+
+ /// Asserts the value is an integer.
+ pub fn toBigIntAdvanced(
+ val: Value,
+ space: *BigIntSpace,
+ target: Target,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!BigIntConst {
+ switch (val.tag()) {
+ .null_value,
.zero,
.bool_false,
.the_only_possible_value, // i0, u0
@@ -960,19 +1072,51 @@ pub const Value = extern union {
.bool_true,
=> return BigIntMutable.init(&space.limbs, 1).toConst(),
- .int_u64 => return BigIntMutable.init(&space.limbs, self.castTag(.int_u64).?.data).toConst(),
- .int_i64 => return BigIntMutable.init(&space.limbs, self.castTag(.int_i64).?.data).toConst(),
- .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt(),
- .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt(),
+ .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(),
+ .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(),
+ .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(),
+ .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(),
.undef => unreachable,
+
+ .lazy_align => {
+ const ty = val.castTag(.lazy_align).?.data;
+ if (sema_kit) |sk| {
+ try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
+ }
+ const x = ty.abiAlignment(target);
+ return BigIntMutable.init(&space.limbs, x).toConst();
+ },
+ .lazy_size => {
+ const ty = val.castTag(.lazy_size).?.data;
+ if (sema_kit) |sk| {
+ try sk.sema.resolveTypeLayout(sk.block, sk.src, ty);
+ }
+ const x = ty.abiSize(target);
+ return BigIntMutable.init(&space.limbs, x).toConst();
+ },
+
+ .elem_ptr => {
+ const elem_ptr = val.castTag(.elem_ptr).?.data;
+ const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, sema_kit)).?;
+ const elem_size = elem_ptr.elem_ty.abiSize(target);
+ const new_addr = array_addr + elem_size * elem_ptr.index;
+ return BigIntMutable.init(&space.limbs, new_addr).toConst();
+ },
+
else => unreachable,
}
}
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
- pub fn getUnsignedInt(val: Value) ?u64 {
+ pub fn getUnsignedInt(val: Value, target: Target) ?u64 {
+ return getUnsignedIntAdvanced(val, target, null) catch unreachable;
+ }
+
+ /// If the value fits in a u64, return it, otherwise null.
+ /// Asserts not undefined.
+ pub fn getUnsignedIntAdvanced(val: Value, target: Target, sema_kit: ?Module.WipAnalysis) !?u64 {
switch (val.tag()) {
.zero,
.bool_false,
@@ -989,13 +1133,31 @@ pub const Value = extern union {
.int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null,
.undef => unreachable,
+
+ .lazy_align => {
+ const ty = val.castTag(.lazy_align).?.data;
+ if (sema_kit) |sk| {
+ return (try ty.abiAlignmentAdvanced(target, .{ .sema_kit = sk })).scalar;
+ } else {
+ return ty.abiAlignment(target);
+ }
+ },
+ .lazy_size => {
+ const ty = val.castTag(.lazy_size).?.data;
+ if (sema_kit) |sk| {
+ return (try ty.abiSizeAdvanced(target, .{ .sema_kit = sk })).scalar;
+ } else {
+ return ty.abiSize(target);
+ }
+ },
+
else => return null,
}
}
/// Asserts the value is an integer and it fits in a u64
- pub fn toUnsignedInt(val: Value) u64 {
- return getUnsignedInt(val).?;
+ pub fn toUnsignedInt(val: Value, target: Target) u64 {
+ return getUnsignedInt(val, target).?;
}
/// Asserts the value is an integer and it fits in a i64
@@ -1028,26 +1190,39 @@ pub const Value = extern union {
};
}
- pub fn writeToMemory(val: Value, ty: Type, target: Target, buffer: []u8) void {
+ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void {
+ const target = mod.getTarget();
+ if (val.isUndef()) {
+ const size = @intCast(usize, ty.abiSize(target));
+ std.mem.set(u8, buffer[0..size], 0xaa);
+ return;
+ }
switch (ty.zigTypeTag()) {
+ .Void => {},
+ .Bool => {
+ buffer[0] = @boolToInt(val.toBool());
+ },
.Int => {
var bigint_buffer: BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_buffer);
+ const bigint = val.toBigInt(&bigint_buffer, target);
const bits = ty.intInfo(target).bits;
- bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ const abi_size = @intCast(usize, ty.abiSize(target));
+ bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Enum => {
var enum_buffer: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_buffer);
var bigint_buffer: BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_buffer);
+ const bigint = int_val.toBigInt(&bigint_buffer, target);
const bits = ty.intInfo(target).bits;
- bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ const abi_size = @intCast(usize, ty.abiSize(target));
+ bigint.writeTwosComplement(buffer, bits, abi_size, target.cpu.arch.endian());
},
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
64 => return floatWriteToMemory(f64, val.toFloat(f64), target, buffer),
+ 80 => return floatWriteToMemory(f80, val.toFloat(f80), target, buffer),
128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
else => unreachable,
},
@@ -1059,67 +1234,306 @@ pub const Value = extern union {
var elem_value_buf: ElemValueBuffer = undefined;
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
- const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf);
- writeToMemory(elem_val, elem_ty, target, buffer[buf_off..]);
+ const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
+ writeToMemory(elem_val, elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size;
}
},
+ .Struct => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => {
+ const fields = ty.structFields().values();
+ const field_vals = val.castTag(.aggregate).?.data;
+ for (fields) |field, i| {
+ const off = @intCast(usize, ty.structFieldOffset(i, target));
+ writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
+ }
+ },
+ .Packed => {
+ // TODO allocate enough heap space instead of using this buffer
+ // on the stack.
+ var buf: [16]std.math.big.Limb = undefined;
+ const host_int = packedStructToInt(val, ty, target, &buf);
+ const abi_size = @intCast(usize, ty.abiSize(target));
+ const bit_size = @intCast(usize, ty.bitSize(target));
+ host_int.writeTwosComplement(buffer, bit_size, abi_size, target.cpu.arch.endian());
+ },
+ },
+ .ErrorSet => {
+ // TODO revisit this when we have the concept of the error tag type
+ const Int = u16;
+ const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
+ std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), target.cpu.arch.endian());
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
- pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: Allocator) !Value {
+ fn packedStructToInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst {
+ var bigint = BigIntMutable.init(buf, 0);
+ const fields = ty.structFields().values();
+ const field_vals = val.castTag(.aggregate).?.data;
+ var bits: u16 = 0;
+ // TODO allocate enough heap space instead of using this buffer
+ // on the stack.
+ var field_buf: [16]std.math.big.Limb = undefined;
+ var field_space: BigIntSpace = undefined;
+ var field_buf2: [16]std.math.big.Limb = undefined;
+ for (fields) |field, i| {
+ const field_val = field_vals[i];
+ const field_bigint_const = switch (field.ty.zigTypeTag()) {
+ .Float => switch (field.ty.floatBits(target)) {
+ 16 => bitcastFloatToBigInt(f16, field_val.toFloat(f16), &field_buf),
+ 32 => bitcastFloatToBigInt(f32, field_val.toFloat(f32), &field_buf),
+ 64 => bitcastFloatToBigInt(f64, field_val.toFloat(f64), &field_buf),
+ 80 => bitcastFloatToBigInt(f80, field_val.toFloat(f80), &field_buf),
+ 128 => bitcastFloatToBigInt(f128, field_val.toFloat(f128), &field_buf),
+ else => unreachable,
+ },
+ .Int, .Bool => field_val.toBigInt(&field_space, target),
+ .Struct => packedStructToInt(field_val, field.ty, target, &field_buf),
+ else => unreachable,
+ };
+ var field_bigint = BigIntMutable.init(&field_buf2, 0);
+ field_bigint.shiftLeft(field_bigint_const, bits);
+ bits += @intCast(u16, field.ty.bitSize(target));
+ bigint.bitOr(bigint.toConst(), field_bigint.toConst());
+ }
+ return bigint.toConst();
+ }
+
+ fn bitcastFloatToBigInt(comptime F: type, f: F, buf: []std.math.big.Limb) BigIntConst {
+ const Int = @Type(.{ .Int = .{
+ .signedness = .unsigned,
+ .bits = @typeInfo(F).Float.bits,
+ } });
+ const int = @bitCast(Int, f);
+ return BigIntMutable.init(buf, int).toConst();
+ }
+
+ pub fn readFromMemory(
+ ty: Type,
+ mod: *Module,
+ buffer: []const u8,
+ arena: Allocator,
+ ) Allocator.Error!Value {
+ const target = mod.getTarget();
switch (ty.zigTypeTag()) {
+ .Void => return Value.@"void",
+ .Bool => {
+ if (buffer[0] == 0) {
+ return Value.@"false";
+ } else {
+ return Value.@"true";
+ }
+ },
.Int => {
+ if (buffer.len == 0) return Value.zero;
const int_info = ty.intInfo(target);
const endian = target.cpu.arch.endian();
- // TODO use a correct amount of limbs
- const limbs_buffer = try arena.alloc(std.math.big.Limb, 2);
+ const Limb = std.math.big.Limb;
+ const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
+ const limbs_buffer = try arena.alloc(Limb, limb_count);
+ const abi_size = @intCast(usize, ty.abiSize(target));
var bigint = BigIntMutable.init(limbs_buffer, 0);
- bigint.readTwosComplement(buffer, int_info.bits, endian, int_info.signedness);
+ bigint.readTwosComplement(buffer, int_info.bits, abi_size, endian, int_info.signedness);
return fromBigInt(arena, bigint.toConst());
},
.Float => switch (ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, floatReadFromMemory(f16, target, buffer)),
32 => return Value.Tag.float_32.create(arena, floatReadFromMemory(f32, target, buffer)),
64 => return Value.Tag.float_64.create(arena, floatReadFromMemory(f64, target, buffer)),
+ 80 => return Value.Tag.float_80.create(arena, floatReadFromMemory(f80, target, buffer)),
128 => return Value.Tag.float_128.create(arena, floatReadFromMemory(f128, target, buffer)),
else => unreachable,
},
+ .Array, .Vector => {
+ const elem_ty = ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
+ var offset: usize = 0;
+ for (elems) |*elem| {
+ elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena);
+ offset += @intCast(usize, elem_size);
+ }
+ return Tag.aggregate.create(arena, elems);
+ },
+ .Struct => switch (ty.containerLayout()) {
+ .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Extern => {
+ const fields = ty.structFields().values();
+ const field_vals = try arena.alloc(Value, fields.len);
+ for (fields) |field, i| {
+ const off = @intCast(usize, ty.structFieldOffset(i, target));
+ field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..], arena);
+ }
+ return Tag.aggregate.create(arena, field_vals);
+ },
+ .Packed => {
+ const endian = target.cpu.arch.endian();
+ const Limb = std.math.big.Limb;
+ const abi_size = @intCast(usize, ty.abiSize(target));
+ const bit_size = @intCast(usize, ty.bitSize(target));
+ const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
+ const limbs_buffer = try arena.alloc(Limb, limb_count);
+ var bigint = BigIntMutable.init(limbs_buffer, 0);
+ bigint.readTwosComplement(buffer, bit_size, abi_size, endian, .unsigned);
+ return intToPackedStruct(ty, target, bigint.toConst(), arena);
+ },
+ },
+ .ErrorSet => {
+ // TODO revisit this when we have the concept of the error tag type
+ const Int = u16;
+ const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
+
+ const payload = try arena.create(Value.Payload.Error);
+ payload.* = .{
+ .base = .{ .tag = .@"error" },
+ .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] },
+ };
+ return Value.initPayload(&payload.base);
+ },
else => @panic("TODO implement readFromMemory for more types"),
}
}
+ fn intToPackedStruct(
+ ty: Type,
+ target: Target,
+ bigint: BigIntConst,
+ arena: Allocator,
+ ) Allocator.Error!Value {
+ const limbs_buffer = try arena.alloc(std.math.big.Limb, bigint.limbs.len);
+ var bigint_mut = bigint.toMutable(limbs_buffer);
+ const fields = ty.structFields().values();
+ const field_vals = try arena.alloc(Value, fields.len);
+ var bits: u16 = 0;
+ for (fields) |field, i| {
+ const field_bits = @intCast(u16, field.ty.bitSize(target));
+ bigint_mut.shiftRight(bigint, bits);
+ bigint_mut.truncate(bigint_mut.toConst(), .unsigned, field_bits);
+ bits += field_bits;
+ const field_bigint = bigint_mut.toConst();
+
+ field_vals[i] = switch (field.ty.zigTypeTag()) {
+ .Float => switch (field.ty.floatBits(target)) {
+ 16 => try bitCastBigIntToFloat(f16, .float_16, field_bigint, arena),
+ 32 => try bitCastBigIntToFloat(f32, .float_32, field_bigint, arena),
+ 64 => try bitCastBigIntToFloat(f64, .float_64, field_bigint, arena),
+ 80 => try bitCastBigIntToFloat(f80, .float_80, field_bigint, arena),
+ 128 => try bitCastBigIntToFloat(f128, .float_128, field_bigint, arena),
+ else => unreachable,
+ },
+ .Bool => makeBool(!field_bigint.eqZero()),
+ .Int => try Tag.int_big_positive.create(
+ arena,
+ try arena.dupe(std.math.big.Limb, field_bigint.limbs),
+ ),
+ .Struct => try intToPackedStruct(field.ty, target, field_bigint, arena),
+ else => unreachable,
+ };
+ }
+ return Tag.aggregate.create(arena, field_vals);
+ }
+
+ fn bitCastBigIntToFloat(
+ comptime F: type,
+ comptime float_tag: Tag,
+ bigint: BigIntConst,
+ arena: Allocator,
+ ) !Value {
+ const Int = @Type(.{ .Int = .{
+ .signedness = .unsigned,
+ .bits = @typeInfo(F).Float.bits,
+ } });
+ const int = bigint.to(Int) catch |err| switch (err) {
+ error.NegativeIntoUnsigned => unreachable,
+ error.TargetTooSmall => unreachable,
+ };
+ const f = @bitCast(F, int);
+ return float_tag.create(arena, f);
+ }
+
fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
+ const endian = target.cpu.arch.endian();
+ if (F == f80) {
+ const repr = std.math.break_f80(f);
+ std.mem.writeInt(u64, buffer[0..8], repr.fraction, endian);
+ std.mem.writeInt(u16, buffer[8..10], repr.exp, endian);
+ // TODO set the rest of the bytes to undefined. should we use 0xaa
+ // or is there a different way?
+ return;
+ }
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
const int = @bitCast(Int, f);
- std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], int, target.cpu.arch.endian());
+ std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], int, endian);
}
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
+ if (F == f80) {
+ switch (target.cpu.arch) {
+ .i386, .x86_64 => return std.math.make_f80(.{
+ .fraction = std.mem.readIntLittle(u64, buffer[0..8]),
+ .exp = std.mem.readIntLittle(u16, buffer[8..10]),
+ }),
+ else => {},
+ }
+ }
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
- const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
+ const int = readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
return @bitCast(F, int);
}
+ fn readInt(comptime Int: type, buffer: *const [@sizeOf(Int)]u8, endian: std.builtin.Endian) Int {
+ var result: Int = 0;
+ switch (endian) {
+ .Big => {
+ for (buffer) |byte| {
+ result <<= 8;
+ result |= byte;
+ }
+ },
+ .Little => {
+ var i: usize = buffer.len;
+ while (i != 0) {
+ i -= 1;
+ result <<= 8;
+ result |= buffer[i];
+ }
+ },
+ }
+ return result;
+ }
+
/// Asserts that the value is a float or an integer.
pub fn toFloat(val: Value, comptime T: type) T {
return switch (val.tag()) {
.float_16 => @floatCast(T, val.castTag(.float_16).?.data),
.float_32 => @floatCast(T, val.castTag(.float_32).?.data),
.float_64 => @floatCast(T, val.castTag(.float_64).?.data),
+ .float_80 => @floatCast(T, val.castTag(.float_80).?.data),
.float_128 => @floatCast(T, val.castTag(.float_128).?.data),
.zero => 0,
.one => 1,
- .int_u64 => @intToFloat(T, val.castTag(.int_u64).?.data),
- .int_i64 => @intToFloat(T, val.castTag(.int_i64).?.data),
+ .int_u64 => {
+ if (T == f80) {
+ @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
+ }
+ return @intToFloat(T, val.castTag(.int_u64).?.data);
+ },
+ .int_i64 => {
+ if (T == f80) {
+ @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
+ }
+ return @intToFloat(T, val.castTag(.int_i64).?.data);
+ },
.int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)),
.int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)),
@@ -1137,7 +1551,7 @@ pub const Value = extern union {
while (i != 0) {
i -= 1;
const limb: f128 = @intToFloat(f128, limbs[i]);
- result = @mulAdd(f128, base, limb, result);
+ result = @mulAdd(f128, base, result, limb);
}
if (positive) {
return result;
@@ -1228,153 +1642,119 @@ pub const Value = extern union {
}
}
- /// Asserts the value is an integer and not undefined.
- /// Returns the number of bits the value requires to represent stored in twos complement form.
- pub fn intBitCountTwosComp(self: Value, target: Target) usize {
- switch (self.tag()) {
- .zero,
- .bool_false,
- .the_only_possible_value,
- => return 0,
+ pub fn popCount(val: Value, ty: Type, target: Target) u64 {
+ assert(!val.isUndef());
+ switch (val.tag()) {
+ .zero, .bool_false => return 0,
+ .one, .bool_true => return 1,
- .one,
- .bool_true,
- => return 1,
+ .int_u64 => return @popCount(u64, val.castTag(.int_u64).?.data),
- .int_u64 => {
- const x = self.castTag(.int_u64).?.data;
- if (x == 0) return 0;
- return @intCast(usize, std.math.log2(x) + 1);
- },
- .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(),
- .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(),
+ else => {
+ const info = ty.intInfo(target);
- .decl_ref_mut,
- .extern_fn,
- .decl_ref,
- .function,
- .variable,
- .eu_payload_ptr,
- .opt_payload_ptr,
- => return target.cpu.arch.ptrBitWidth(),
+ var buffer: Value.BigIntSpace = undefined;
+ const operand_bigint = val.toBigInt(&buffer, target);
- else => {
- var buffer: BigIntSpace = undefined;
- return self.toBigInt(&buffer).bitCountTwosComp();
+ var limbs_buffer: [4]std.math.big.Limb = undefined;
+ var result_bigint = BigIntMutable{
+ .limbs = &limbs_buffer,
+ .positive = undefined,
+ .len = undefined,
+ };
+ result_bigint.popCount(operand_bigint, info.bits);
+
+ return result_bigint.toConst().to(u64) catch unreachable;
},
}
}
- pub fn popCount(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
+ pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
+ assert(!val.isUndef());
+
+ const info = ty.intInfo(target);
+
+ var buffer: Value.BigIntSpace = undefined;
+ const operand_bigint = val.toBigInt(&buffer, target);
+
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcTwosCompLimbCount(info.bits),
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitReverse(operand_bigint, info.signedness, info.bits);
+
+ return fromBigInt(arena, result_bigint.toConst());
+ }
+
+ pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
assert(!val.isUndef());
const info = ty.intInfo(target);
+ // Bit count must be evenly divisible by 8
+ assert(info.bits % 8 == 0);
+
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer);
+ const operand_bigint = val.toBigInt(&buffer, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.popCount(operand_bigint, info.bits);
+ result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8);
return fromBigInt(arena, result_bigint.toConst());
}
- /// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
- pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
+ /// Asserts the value is an integer and not undefined.
+ /// Returns the number of bits the value requires to represent stored in twos complement form.
+ pub fn intBitCountTwosComp(self: Value, target: Target) usize {
switch (self.tag()) {
.zero,
- .undef,
.bool_false,
- => return true,
+ .the_only_possible_value,
+ => return 0,
.one,
.bool_true,
- => {
- const info = ty.intInfo(target);
- return switch (info.signedness) {
- .signed => info.bits >= 2,
- .unsigned => info.bits >= 1,
- };
- },
-
- .int_u64 => switch (ty.zigTypeTag()) {
- .Int => {
- const x = self.castTag(.int_u64).?.data;
- if (x == 0) return true;
- const info = ty.intInfo(target);
- const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
- return info.bits >= needed_bits;
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_i64 => switch (ty.zigTypeTag()) {
- .Int => {
- const x = self.castTag(.int_i64).?.data;
- if (x == 0) return true;
- const info = ty.intInfo(target);
- if (info.signedness == .unsigned and x < 0)
- return false;
- var buffer: BigIntSpace = undefined;
- return self.toBigInt(&buffer).fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_big_positive => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- return self.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
- .int_big_negative => switch (ty.zigTypeTag()) {
- .Int => {
- const info = ty.intInfo(target);
- return self.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
- },
- .ComptimeInt => return true,
- else => unreachable,
- },
+ => return 1,
- .the_only_possible_value => {
- assert(ty.intInfo(target).bits == 0);
- return true;
+ .int_u64 => {
+ const x = self.castTag(.int_u64).?.data;
+ if (x == 0) return 0;
+ return @intCast(usize, std.math.log2(x) + 1);
},
+ .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(),
+ .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(),
.decl_ref_mut,
+ .comptime_field_ptr,
.extern_fn,
.decl_ref,
.function,
.variable,
- => {
- const info = ty.intInfo(target);
- const ptr_bits = target.cpu.arch.ptrBitWidth();
- return switch (info.signedness) {
- .signed => info.bits > ptr_bits,
- .unsigned => info.bits >= ptr_bits,
- };
- },
+ .eu_payload_ptr,
+ .opt_payload_ptr,
+ => return target.cpu.arch.ptrBitWidth(),
- else => unreachable,
+ else => {
+ var buffer: BigIntSpace = undefined;
+ return self.toBigInt(&buffer, target).bitCountTwosComp();
+ },
}
}
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
- pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type) !Value {
- switch (dest_ty.tag()) {
- .f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
- .f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
- .f64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)),
- .f128, .comptime_float, .c_longdouble => {
- return Value.Tag.float_128.create(arena, self.toFloat(f128));
- },
+ pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ switch (dest_ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
+ 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
+ 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)),
+ 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)),
+ 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)),
else => unreachable,
}
}
@@ -1389,34 +1769,22 @@ pub const Value = extern union {
.float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0,
.float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0,
.float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0,
- // .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
- .float_128 => @panic("TODO lld: error: undefined symbol: fmodl"),
+ //.float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0,
+ .float_80 => @panic("TODO implement __remx in compiler-rt"),
+ .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0,
else => unreachable,
};
}
- /// Asserts the value is numeric
- pub fn isZero(self: Value) bool {
- return switch (self.tag()) {
- .zero, .the_only_possible_value => true,
- .one => false,
-
- .int_u64 => self.castTag(.int_u64).?.data == 0,
- .int_i64 => self.castTag(.int_i64).?.data == 0,
-
- .float_16 => self.castTag(.float_16).?.data == 0,
- .float_32 => self.castTag(.float_32).?.data == 0,
- .float_64 => self.castTag(.float_64).?.data == 0,
- .float_128 => self.castTag(.float_128).?.data == 0,
-
- .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().eqZero(),
- .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().eqZero(),
- else => unreachable,
- };
+ pub fn orderAgainstZero(lhs: Value) std.math.Order {
+ return orderAgainstZeroAdvanced(lhs, null) catch unreachable;
}
- pub fn orderAgainstZero(lhs: Value) std.math.Order {
+ pub fn orderAgainstZeroAdvanced(
+ lhs: Value,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!std.math.Order {
return switch (lhs.tag()) {
.zero,
.bool_false,
@@ -1427,6 +1795,7 @@ pub const Value = extern union {
.bool_true,
.decl_ref,
.decl_ref_mut,
+ .comptime_field_ptr,
.extern_fn,
.function,
.variable,
@@ -1437,21 +1806,60 @@ pub const Value = extern union {
.int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0),
.int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0),
+ .lazy_align => {
+ const ty = lhs.castTag(.lazy_align).?.data;
+ if (try ty.hasRuntimeBitsAdvanced(false, sema_kit)) {
+ return .gt;
+ } else {
+ return .eq;
+ }
+ },
+ .lazy_size => {
+ const ty = lhs.castTag(.lazy_size).?.data;
+ if (try ty.hasRuntimeBitsAdvanced(false, sema_kit)) {
+ return .gt;
+ } else {
+ return .eq;
+ }
+ },
+
.float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0),
.float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0),
.float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0),
+ .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0),
.float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0),
+ .elem_ptr => {
+ const elem_ptr = lhs.castTag(.elem_ptr).?.data;
+ switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(sema_kit)) {
+ .lt => unreachable,
+ .gt => return .gt,
+ .eq => {
+ if (elem_ptr.index == 0) {
+ return .eq;
+ } else {
+ return .gt;
+ }
+ },
+ }
+ },
+
else => unreachable,
};
}
/// Asserts the value is comparable.
- pub fn order(lhs: Value, rhs: Value) std.math.Order {
+ pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order {
+ return orderAdvanced(lhs, rhs, target, null) catch unreachable;
+ }
+
+ /// Asserts the value is comparable.
+ /// If sema_kit is null then this function asserts things are resolved and cannot fail.
+ pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, sema_kit: ?Module.WipAnalysis) !std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = rhs.tag();
- const lhs_against_zero = lhs.orderAgainstZero();
- const rhs_against_zero = rhs.orderAgainstZero();
+ const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(sema_kit);
+ const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(sema_kit);
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
@@ -1471,6 +1879,7 @@ pub const Value = extern union {
.float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data),
.float_32 => return std.math.order(lhs.castTag(.float_32).?.data, rhs.castTag(.float_32).?.data),
.float_64 => return std.math.order(lhs.castTag(.float_64).?.data, rhs.castTag(.float_64).?.data),
+ .float_80 => return std.math.order(lhs.castTag(.float_80).?.data, rhs.castTag(.float_80).?.data),
.float_128 => return std.math.order(lhs.castTag(.float_128).?.data, rhs.castTag(.float_128).?.data),
else => unreachable,
};
@@ -1484,14 +1893,24 @@ pub const Value = extern union {
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_bigint_space);
- const rhs_bigint = rhs.toBigInt(&rhs_bigint_space);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, sema_kit);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, sema_kit);
return lhs_bigint.order(rhs_bigint);
}
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
- pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
+ pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool {
+ return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable;
+ }
+
+ pub fn compareHeteroAdvanced(
+ lhs: Value,
+ op: std.math.CompareOperator,
+ rhs: Value,
+ target: Target,
+ sema_kit: ?Module.WipAnalysis,
+ ) !bool {
if (lhs.pointerDecl()) |lhs_decl| {
if (rhs.pointerDecl()) |rhs_decl| {
switch (op) {
@@ -1513,30 +1932,89 @@ pub const Value = extern union {
else => {},
}
}
- return order(lhs, rhs).compare(op);
+ return (try orderAdvanced(lhs, rhs, target, sema_kit)).compare(op);
+ }
+
+ /// Asserts the values are comparable. Both operands have type `ty`.
+ /// Vector results will be reduced with AND.
+ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool {
+ if (ty.zigTypeTag() == .Vector) {
+ var i: usize = 0;
+ while (i < ty.vectorLen()) : (i += 1) {
+ if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return compareScalar(lhs, op, rhs, ty, mod);
}
- /// Asserts the value is comparable. Both operands have type `ty`.
- pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool {
+ /// Asserts the values are comparable. Both operands have type `ty`.
+ pub fn compareScalar(
+ lhs: Value,
+ op: std.math.CompareOperator,
+ rhs: Value,
+ ty: Type,
+ mod: *Module,
+ ) bool {
return switch (op) {
- .eq => lhs.eql(rhs, ty),
- .neq => !lhs.eql(rhs, ty),
- else => compareHetero(lhs, op, rhs),
+ .eq => lhs.eql(rhs, ty, mod),
+ .neq => !lhs.eql(rhs, ty, mod),
+ else => compareHetero(lhs, op, rhs, mod.getTarget()),
};
}
/// Asserts the value is comparable.
+ /// Vector results will be reduced with AND.
pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool {
- return orderAgainstZero(lhs).compare(op);
+ return compareWithZeroAdvanced(lhs, op, null) catch unreachable;
}
- pub fn eql(a: Value, b: Value, ty: Type) bool {
+ pub fn compareWithZeroAdvanced(
+ lhs: Value,
+ op: std.math.CompareOperator,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!bool {
+ switch (lhs.tag()) {
+ .repeated => return lhs.castTag(.repeated).?.data.compareWithZeroAdvanced(op, sema_kit),
+ .aggregate => {
+ for (lhs.castTag(.aggregate).?.data) |elem_val| {
+ if (!(try elem_val.compareWithZeroAdvanced(op, sema_kit))) return false;
+ }
+ return true;
+ },
+ else => {},
+ }
+ return (try orderAgainstZeroAdvanced(lhs, sema_kit)).compare(op);
+ }
+
+ /// This function is used by hash maps and so treats floating-point NaNs as equal
+ /// to each other, and not equal to other floating-point values.
+ /// Similarly, it treats `undef` as a distinct value from all other values.
+ /// This function has to be able to support implicit coercion of `a` to `ty`. That is,
+ /// `ty` will be an exactly correct Type for `b` but it may be a post-coerced Type
+ /// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication
+ /// is required in order to make generic function instantiation efficient - specifically
+ /// the insertion into the monomorphized function table.
+ pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
+ return eqlAdvanced(a, b, ty, mod, null) catch unreachable;
+ }
+
+ /// If `null` is provided for `sema_kit` then it is guaranteed no error will be returned.
+ pub fn eqlAdvanced(
+ a: Value,
+ b: Value,
+ ty: Type,
+ mod: *Module,
+ sema_kit: ?Module.WipAnalysis,
+ ) Module.CompileError!bool {
+ const target = mod.getTarget();
const a_tag = a.tag();
const b_tag = b.tag();
- assert(a_tag != .undef);
- assert(b_tag != .undef);
if (a_tag == b_tag) switch (a_tag) {
- .void_value, .null_value, .the_only_possible_value => return true,
+ .undef => return true,
+ .void_value, .null_value, .the_only_possible_value, .empty_struct_value => return true,
.enum_literal => {
const a_name = a.castTag(.enum_literal).?.data;
const b_name = b.castTag(.enum_literal).?.data;
@@ -1551,55 +2029,115 @@ pub const Value = extern union {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
var buffer: Type.Payload.ElemType = undefined;
- return eql(a_payload, b_payload, ty.optionalChild(&buffer));
+ return eqlAdvanced(a_payload, b_payload, ty.optionalChild(&buffer), mod, sema_kit);
},
.slice => {
const a_payload = a.castTag(.slice).?.data;
const b_payload = b.castTag(.slice).?.data;
- if (!eql(a_payload.len, b_payload.len, Type.usize)) return false;
+ if (!(try eqlAdvanced(a_payload.len, b_payload.len, Type.usize, mod, sema_kit))) {
+ return false;
+ }
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- return eql(a_payload.ptr, b_payload.ptr, ptr_ty);
+ return eqlAdvanced(a_payload.ptr, b_payload.ptr, ptr_ty, mod, sema_kit);
},
- .elem_ptr => @panic("TODO: Implement more pointer eql cases"),
- .field_ptr => @panic("TODO: Implement more pointer eql cases"),
- .eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
- .opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
- .array => {
- const a_array = a.castTag(.array).?.data;
- const b_array = b.castTag(.array).?.data;
-
- if (a_array.len != b_array.len) return false;
+ .elem_ptr => {
+ const a_payload = a.castTag(.elem_ptr).?.data;
+ const b_payload = b.castTag(.elem_ptr).?.data;
+ if (a_payload.index != b_payload.index) return false;
- const elem_ty = ty.childType();
- for (a_array) |a_elem, i| {
- const b_elem = b_array[i];
+ return eqlAdvanced(a_payload.array_ptr, b_payload.array_ptr, ty, mod, sema_kit);
+ },
+ .field_ptr => {
+ const a_payload = a.castTag(.field_ptr).?.data;
+ const b_payload = b.castTag(.field_ptr).?.data;
+ if (a_payload.field_index != b_payload.field_index) return false;
- if (!eql(a_elem, b_elem, elem_ty)) return false;
- }
- return true;
+ return eqlAdvanced(a_payload.container_ptr, b_payload.container_ptr, ty, mod, sema_kit);
+ },
+ .@"error" => {
+ const a_name = a.castTag(.@"error").?.data.name;
+ const b_name = b.castTag(.@"error").?.data.name;
+ return std.mem.eql(u8, a_name, b_name);
+ },
+ .eu_payload => {
+ const a_payload = a.castTag(.eu_payload).?.data;
+ const b_payload = b.castTag(.eu_payload).?.data;
+ return eqlAdvanced(a_payload, b_payload, ty.errorUnionPayload(), mod, sema_kit);
},
+ .eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
+ .opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
.function => {
const a_payload = a.castTag(.function).?.data;
const b_payload = b.castTag(.function).?.data;
return a_payload == b_payload;
},
- .@"struct" => {
- const fields = ty.structFields().values();
- const a_field_vals = a.castTag(.@"struct").?.data;
- const b_field_vals = b.castTag(.@"struct").?.data;
+ .aggregate => {
+ const a_field_vals = a.castTag(.aggregate).?.data;
+ const b_field_vals = b.castTag(.aggregate).?.data;
assert(a_field_vals.len == b_field_vals.len);
- assert(fields.len == a_field_vals.len);
- for (fields) |field, i| {
- if (!eql(a_field_vals[i], b_field_vals[i], field.ty)) return false;
+
+ if (ty.isTupleOrAnonStruct()) {
+ const types = ty.tupleFields().types;
+ assert(types.len == a_field_vals.len);
+ for (types) |field_ty, i| {
+ if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field_ty, mod, sema_kit))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (ty.zigTypeTag() == .Struct) {
+ const fields = ty.structFields().values();
+ assert(fields.len == a_field_vals.len);
+ for (fields) |field, i| {
+ if (!(try eqlAdvanced(a_field_vals[i], b_field_vals[i], field.ty, mod, sema_kit))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ const elem_ty = ty.childType();
+ for (a_field_vals) |a_elem, i| {
+ const b_elem = b_field_vals[i];
+
+ if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
},
+ .@"union" => {
+ const a_union = a.castTag(.@"union").?.data;
+ const b_union = b.castTag(.@"union").?.data;
+ switch (ty.containerLayout()) {
+ .Packed, .Extern => {
+ const tag_ty = ty.unionTagTypeHypothetical();
+ if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
+ // In this case, we must disregard mismatching tags and compare
+ // based on the in-memory bytes of the payloads.
+ @panic("TODO comptime comparison of extern union values with mismatching tags");
+ }
+ },
+ .Auto => {
+ const tag_ty = ty.unionTagTypeHypothetical();
+ if (!(try a_union.tag.eqlAdvanced(b_union.tag, tag_ty, mod, sema_kit))) {
+ return false;
+ }
+ },
+ }
+ const active_field_ty = ty.unionFieldType(a_union.tag, mod);
+ return a_union.val.eqlAdvanced(b_union.val, active_field_ty, mod, sema_kit);
+ },
else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
return false;
+ } else if (a_tag == .undef or b_tag == .undef) {
+ return false;
}
if (a.pointerDecl()) |a_decl| {
@@ -1618,7 +2156,7 @@ pub const Value = extern union {
var buf_b: ToTypeBuffer = undefined;
const a_type = a.toType(&buf_a);
const b_type = b.toType(&buf_b);
- return a_type.eql(b_type);
+ return a_type.eql(b_type, mod);
},
.Enum => {
var buf_a: Payload.U64 = undefined;
@@ -1627,7 +2165,7 @@ pub const Value = extern union {
const b_val = b.enumToInt(ty, &buf_b);
var buf_ty: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf_ty);
- return eql(a_val, b_val, int_ty);
+ return eqlAdvanced(a_val, b_val, int_ty, mod, sema_kit);
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -1636,24 +2174,58 @@ pub const Value = extern union {
var a_buf: ElemValueBuffer = undefined;
var b_buf: ElemValueBuffer = undefined;
while (i < len) : (i += 1) {
- const a_elem = elemValueBuffer(a, i, &a_buf);
- const b_elem = elemValueBuffer(b, i, &b_buf);
- if (!eql(a_elem, b_elem, elem_ty)) return false;
+ const a_elem = elemValueBuffer(a, mod, i, &a_buf);
+ const b_elem = elemValueBuffer(b, mod, i, &b_buf);
+ if (!(try eqlAdvanced(a_elem, b_elem, elem_ty, mod, sema_kit))) {
+ return false;
+ }
}
return true;
},
.Struct => {
- // must be a struct with no fields since we checked for if
- // both have the struct tag above.
- const fields = ty.structFields().values();
- assert(fields.len == 0);
- return true;
+ // A tuple can be represented with .empty_struct_value,
+ // the_one_possible_value, .aggregate in which case we could
+ // end up here and the values are equal if the type has zero fields.
+ return ty.isTupleOrAnonStruct() and ty.structFieldCount() != 0;
+ },
+ .Float => {
+ switch (ty.floatBits(target)) {
+ 16 => return @bitCast(u16, a.toFloat(f16)) == @bitCast(u16, b.toFloat(f16)),
+ 32 => return @bitCast(u32, a.toFloat(f32)) == @bitCast(u32, b.toFloat(f32)),
+ 64 => return @bitCast(u64, a.toFloat(f64)) == @bitCast(u64, b.toFloat(f64)),
+ 80 => return @bitCast(u80, a.toFloat(f80)) == @bitCast(u80, b.toFloat(f80)),
+ 128 => return @bitCast(u128, a.toFloat(f128)) == @bitCast(u128, b.toFloat(f128)),
+ else => unreachable,
+ }
},
- else => return order(a, b).compare(.eq),
+ .ComptimeFloat => {
+ const a_float = a.toFloat(f128);
+ const b_float = b.toFloat(f128);
+
+ const a_nan = std.math.isNan(a_float);
+ const b_nan = std.math.isNan(b_float);
+ if (a_nan != b_nan) return false;
+ if (std.math.signbit(a_float) != std.math.signbit(b_float)) return false;
+ if (a_nan) return true;
+ return a_float == b_float;
+ },
+ .Optional => {
+ if (a.tag() != .opt_payload and b.tag() == .opt_payload) {
+ var buffer: Payload.SubValue = .{
+ .base = .{ .tag = .opt_payload },
+ .data = a,
+ };
+ return eqlAdvanced(Value.initPayload(&buffer.base), b, ty, mod, sema_kit);
+ }
+ },
+ else => {},
}
+ return (try orderAdvanced(a, b, target, sema_kit)).compare(.eq);
}
- pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash) void {
+ /// This function is used by hash maps and so treats floating-point NaNs as equal
+ /// to each other, and not equal to other floating-point values.
+ pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
const zig_ty_tag = ty.zigTypeTag();
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef()) return;
@@ -1670,24 +2242,39 @@ pub const Value = extern union {
.Type => {
var buf: ToTypeBuffer = undefined;
- return val.toType(&buf).hashWithHasher(hasher);
+ return val.toType(&buf).hashWithHasher(hasher, mod);
+ },
+ .Float => {
+ // For hash/eql purposes, we treat floats as their IEEE integer representation.
+ switch (ty.floatBits(mod.getTarget())) {
+ 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16))),
+ 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32))),
+ 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64))),
+ 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80))),
+ 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))),
+ else => unreachable,
+ }
},
- .Float, .ComptimeFloat => {
- // TODO double check the lang spec. should we to bitwise hashing here,
- // or a hash that normalizes the float value?
+ .ComptimeFloat => {
const float = val.toFloat(f128);
- std.hash.autoHash(hasher, @bitCast(u128, float));
+ const is_nan = std.math.isNan(float);
+ std.hash.autoHash(hasher, is_nan);
+ if (!is_nan) {
+ std.hash.autoHash(hasher, @bitCast(u128, float));
+ } else {
+ std.hash.autoHash(hasher, std.math.signbit(float));
+ }
},
.Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) {
.slice => {
const slice = val.castTag(.slice).?.data;
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- hash(slice.ptr, ptr_ty, hasher);
- hash(slice.len, Type.usize, hasher);
+ hash(slice.ptr, ptr_ty, hasher, mod);
+ hash(slice.len, Type.usize, hasher, mod);
},
- else => return hashPtr(val, hasher),
+ else => return hashPtr(val, hasher, mod.getTarget()),
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -1695,16 +2282,33 @@ pub const Value = extern union {
var index: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
while (index < len) : (index += 1) {
- const elem_val = val.elemValueBuffer(index, &elem_value_buf);
- elem_val.hash(elem_ty, hasher);
+ const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf);
+ elem_val.hash(elem_ty, hasher, mod);
}
},
.Struct => {
+ if (ty.isTupleOrAnonStruct()) {
+ const fields = ty.tupleFields();
+ for (fields.values) |field_val, i| {
+ field_val.hash(fields.types[i], hasher, mod);
+ }
+ return;
+ }
const fields = ty.structFields().values();
if (fields.len == 0) return;
- const field_values = val.castTag(.@"struct").?.data;
- for (field_values) |field_val, i| {
- field_val.hash(fields[i].ty, hasher);
+ switch (val.tag()) {
+ .empty_struct_value => {
+ for (fields) |field| {
+ field.default_val.hash(field.ty, hasher, mod);
+ }
+ },
+ .aggregate => {
+ const field_values = val.castTag(.aggregate).?.data;
+ for (field_values) |field_val, i| {
+ field_val.hash(fields[i].ty, hasher, mod);
+ }
+ },
+ else => unreachable,
}
},
.Optional => {
@@ -1713,29 +2317,44 @@ pub const Value = extern union {
const sub_val = payload.data;
var buffer: Type.Payload.ElemType = undefined;
const sub_ty = ty.optionalChild(&buffer);
- sub_val.hash(sub_ty, hasher);
+ sub_val.hash(sub_ty, hasher, mod);
} else {
std.hash.autoHash(hasher, false); // non-null
}
},
.ErrorUnion => {
- @panic("TODO implement hashing error union values");
+ if (val.tag() == .@"error") {
+ std.hash.autoHash(hasher, false); // error
+ const sub_ty = ty.errorUnionSet();
+ val.hash(sub_ty, hasher, mod);
+ return;
+ }
+
+ if (val.castTag(.eu_payload)) |payload| {
+ std.hash.autoHash(hasher, true); // payload
+ const sub_ty = ty.errorUnionPayload();
+ payload.data.hash(sub_ty, hasher, mod);
+ return;
+ } else unreachable;
},
.ErrorSet => {
- @panic("TODO implement hashing error set values");
+ // just hash the literal error value. this is the most stable
+ // thing between compiler invocations. we can't use the error
+ // int cause (1) its not stable and (2) we don't have access to mod.
+ hasher.update(val.getError().?);
},
.Enum => {
var enum_space: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_space);
- hashInt(int_val, hasher);
+ hashInt(int_val, hasher, mod.getTarget());
},
.Union => {
const union_obj = val.cast(Payload.Union).?.data;
if (ty.unionTagType()) |tag_ty| {
- union_obj.tag.hash(tag_ty, hasher);
+ union_obj.tag.hash(tag_ty, hasher, mod);
}
- const active_field_ty = ty.unionFieldType(union_obj.tag);
- union_obj.val.hash(active_field_ty, hasher);
+ const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
+ union_obj.val.hash(active_field_ty, hasher, mod);
},
.Fn => {
const func: *Module.Fn = val.castTag(.function).?.data;
@@ -1760,66 +2379,90 @@ pub const Value = extern union {
pub const ArrayHashContext = struct {
ty: Type,
+ mod: *Module,
pub fn hash(self: @This(), val: Value) u32 {
- const other_context: HashContext = .{ .ty = self.ty };
+ const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod };
return @truncate(u32, other_context.hash(val));
}
pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool {
_ = b_index;
- return a.eql(b, self.ty);
+ return a.eql(b, self.ty, self.mod);
}
};
pub const HashContext = struct {
ty: Type,
+ mod: *Module,
pub fn hash(self: @This(), val: Value) u64 {
var hasher = std.hash.Wyhash.init(0);
- val.hash(self.ty, &hasher);
+ val.hash(self.ty, &hasher, self.mod);
return hasher.final();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
- return a.eql(b, self.ty);
+ return a.eql(b, self.ty, self.mod);
}
};
pub fn isComptimeMutablePtr(val: Value) bool {
return switch (val.tag()) {
- .decl_ref_mut => true,
+ .decl_ref_mut, .comptime_field_ptr => true,
.elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr),
.field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr),
- .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data),
- .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data),
+ .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr),
+ .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr),
else => false,
};
}
+ pub fn canMutateComptimeVarState(val: Value) bool {
+ if (val.isComptimeMutablePtr()) return true;
+ switch (val.tag()) {
+ .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(),
+ .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(),
+ .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(),
+ .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(),
+ .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(),
+ .aggregate => {
+ const fields = val.castTag(.aggregate).?.data;
+ for (fields) |field| {
+ if (field.canMutateComptimeVarState()) return true;
+ }
+ return false;
+ },
+ .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(),
+ .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(),
+ else => return false,
+ }
+ }
+
/// Gets the decl referenced by this pointer. If the pointer does not point
/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr),
/// this function returns null.
- pub fn pointerDecl(val: Value) ?*Module.Decl {
+ pub fn pointerDecl(val: Value) ?Module.Decl.Index {
return switch (val.tag()) {
- .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl,
- .extern_fn, .decl_ref => val.cast(Payload.Decl).?.data,
+ .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index,
+ .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
.variable => val.castTag(.variable).?.data.owner_decl,
+ .decl_ref => val.cast(Payload.Decl).?.data,
else => null,
};
}
- fn hashInt(int_val: Value, hasher: *std.hash.Wyhash) void {
+ fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void {
var buffer: BigIntSpace = undefined;
- const big = int_val.toBigInt(&buffer);
+ const big = int_val.toBigInt(&buffer, target);
std.hash.autoHash(hasher, big.positive);
for (big.limbs) |limb| {
std.hash.autoHash(hasher, limb);
}
}
- fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash) void {
+ fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void {
switch (ptr_val.tag()) {
.decl_ref,
.decl_ref_mut,
@@ -1827,31 +2470,34 @@ pub const Value = extern union {
.function,
.variable,
=> {
- const decl: *Module.Decl = ptr_val.pointerDecl().?;
+ const decl: Module.Decl.Index = ptr_val.pointerDecl().?;
std.hash.autoHash(hasher, decl);
},
+ .comptime_field_ptr => {
+ std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr);
+ },
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- hashPtr(elem_ptr.array_ptr, hasher);
+ hashPtr(elem_ptr.array_ptr, hasher, target);
std.hash.autoHash(hasher, Value.Tag.elem_ptr);
std.hash.autoHash(hasher, elem_ptr.index);
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.field_ptr);
- hashPtr(field_ptr.container_ptr, hasher);
+ hashPtr(field_ptr.container_ptr, hasher, target);
std.hash.autoHash(hasher, field_ptr.field_index);
},
.eu_payload_ptr => {
const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr);
- hashPtr(err_union_ptr, hasher);
+ hashPtr(err_union_ptr.container_ptr, hasher, target);
},
.opt_payload_ptr => {
const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr);
- hashPtr(opt_ptr, hasher);
+ hashPtr(opt_ptr.container_ptr, hasher, target);
},
.zero,
@@ -1863,104 +2509,99 @@ pub const Value = extern union {
.bool_false,
.bool_true,
.the_only_possible_value,
- => return hashInt(ptr_val, hasher),
+ .lazy_align,
+ .lazy_size,
+ => return hashInt(ptr_val, hasher, target),
else => unreachable,
}
}
- pub fn markReferencedDeclsAlive(val: Value) void {
- switch (val.tag()) {
- .decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.markAlive(),
- .extern_fn, .decl_ref => return val.cast(Payload.Decl).?.data.markAlive(),
- .function => return val.castTag(.function).?.data.owner_decl.markAlive(),
- .variable => return val.castTag(.variable).?.data.owner_decl.markAlive(),
-
- .repeated,
- .eu_payload,
- .eu_payload_ptr,
- .opt_payload,
- .opt_payload_ptr,
- .empty_array_sentinel,
- => return markReferencedDeclsAlive(val.cast(Payload.SubValue).?.data),
-
- .array => {
- for (val.cast(Payload.Array).?.data) |elem_val| {
- markReferencedDeclsAlive(elem_val);
- }
- },
- .slice => {
- const slice = val.cast(Payload.Slice).?.data;
- markReferencedDeclsAlive(slice.ptr);
- markReferencedDeclsAlive(slice.len);
- },
-
- .elem_ptr => {
- const elem_ptr = val.cast(Payload.ElemPtr).?.data;
- return markReferencedDeclsAlive(elem_ptr.array_ptr);
- },
- .field_ptr => {
- const field_ptr = val.cast(Payload.FieldPtr).?.data;
- return markReferencedDeclsAlive(field_ptr.container_ptr);
- },
- .@"struct" => {
- for (val.cast(Payload.Struct).?.data) |field_val| {
- markReferencedDeclsAlive(field_val);
- }
- },
- .@"union" => {
- const data = val.cast(Payload.Union).?.data;
- markReferencedDeclsAlive(data.tag);
- markReferencedDeclsAlive(data.val);
- },
-
- else => {},
- }
- }
-
pub fn slicePtr(val: Value) Value {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
// TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc.
- .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr => val,
+ .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val,
else => unreachable,
};
}
- pub fn sliceLen(val: Value) u64 {
+ pub fn sliceLen(val: Value, mod: *Module) u64 {
return switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.len.toUnsignedInt(),
+ .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()),
.decl_ref => {
- const decl = val.castTag(.decl_ref).?.data;
+ const decl_index = val.castTag(.decl_ref).?.data;
+ const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag() == .Array) {
return decl.ty.arrayLen();
} else {
return 1;
}
},
+ .decl_ref_mut => {
+ const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
+ const decl = mod.declPtr(decl_index);
+ if (decl.ty.zigTypeTag() == .Array) {
+ return decl.ty.arrayLen();
+ } else {
+ return 1;
+ }
+ },
+ .comptime_field_ptr => {
+ const payload = val.castTag(.comptime_field_ptr).?.data;
+ if (payload.field_ty.zigTypeTag() == .Array) {
+ return payload.field_ty.arrayLen();
+ } else {
+ return 1;
+ }
+ },
+ else => unreachable,
+ };
+ }
+
+ /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`.
+ /// Some scalar values are considered vector-like to avoid needing to allocate
+ /// a new `repeated` each time a constant is used.
+ pub fn indexVectorlike(val: Value, index: usize) Value {
+ return switch (val.tag()) {
+ .aggregate => val.castTag(.aggregate).?.data[index],
+
+ .repeated => val.castTag(.repeated).?.data,
+ // These values will implicitly be treated as `repeated`.
+ .zero,
+ .one,
+ .bool_false,
+ .bool_true,
+ .int_i64,
+ .int_u64,
+ => val,
+
else => unreachable,
};
}
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
- pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value {
- return elemValueAdvanced(val, index, arena, undefined);
+ pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value {
+ return elemValueAdvanced(val, mod, index, arena, undefined);
}
pub const ElemValueBuffer = Payload.U64;
- pub fn elemValueBuffer(val: Value, index: usize, buffer: *ElemValueBuffer) Value {
- return elemValueAdvanced(val, index, null, buffer) catch unreachable;
+ pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value {
+ return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable;
}
pub fn elemValueAdvanced(
val: Value,
+ mod: *Module,
index: usize,
arena: ?Allocator,
buffer: *ElemValueBuffer,
) error{OutOfMemory}!Value {
switch (val.tag()) {
+ // This is the case of accessing an element of an undef array.
+ .undef => return Value.undef,
.empty_array => unreachable, // out of bounds array index
.empty_struct_value => unreachable, // out of bounds array index
@@ -1981,18 +2622,33 @@ pub const Value = extern union {
return initPayload(&buffer.base);
}
},
+ .str_lit => {
+ const str_lit = val.castTag(.str_lit).?.data;
+ const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ const byte = bytes[index];
+ if (arena) |a| {
+ return Tag.int_u64.create(a, byte);
+ } else {
+ buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = byte,
+ };
+ return initPayload(&buffer.base);
+ }
+ },
// No matter the index; all the elements are the same!
.repeated => return val.castTag(.repeated).?.data,
- .array => return val.castTag(.array).?.data[index],
- .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(index, arena, buffer),
+ .aggregate => return val.castTag(.aggregate).?.data[index],
+ .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer),
- .decl_ref => return val.castTag(.decl_ref).?.data.val.elemValueAdvanced(index, arena, buffer),
- .decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.val.elemValueAdvanced(index, arena, buffer),
+ .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer),
+ .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer),
+ .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer),
.elem_ptr => {
const data = val.castTag(.elem_ptr).?.data;
- return data.array_ptr.elemValueAdvanced(index + data.index, arena, buffer);
+ return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer);
},
// The child type of arrays which have only one possible value need
@@ -2003,11 +2659,47 @@ pub const Value = extern union {
}
}
- pub fn fieldValue(val: Value, allocator: Allocator, index: usize) error{OutOfMemory}!Value {
- _ = allocator;
+ // Asserts that the provided start/end are in-bounds.
+ pub fn sliceArray(
+ val: Value,
+ mod: *Module,
+ arena: Allocator,
+ start: usize,
+ end: usize,
+ ) error{OutOfMemory}!Value {
+ return switch (val.tag()) {
+ .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array),
+ .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]),
+ .str_lit => {
+ const str_lit = val.castTag(.str_lit).?.data;
+ return Tag.str_lit.create(arena, .{
+ .index = @intCast(u32, str_lit.index + start),
+ .len = @intCast(u32, end - start),
+ });
+ },
+ .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]),
+ .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end),
+
+ .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end),
+ .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end),
+ .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end),
+ .elem_ptr => blk: {
+ const elem_ptr = val.castTag(.elem_ptr).?.data;
+ break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index);
+ },
+
+ .repeated,
+ .the_only_possible_value,
+ => val,
+
+ else => unreachable,
+ };
+ }
+
+ pub fn fieldValue(val: Value, ty: Type, index: usize) Value {
switch (val.tag()) {
- .@"struct" => {
- const field_values = val.castTag(.@"struct").?.data;
+ .aggregate => {
+ const field_values = val.castTag(.aggregate).?.data;
return field_values[index];
},
.@"union" => {
@@ -2015,8 +2707,17 @@ pub const Value = extern union {
// TODO assert the tag is correct
return payload.val;
},
- // Structs which have only one possible value need to consist of members which have only one possible value.
- .the_only_possible_value => return val,
+
+ .the_only_possible_value => return ty.onePossibleValue().?,
+
+ .empty_struct_value => {
+ if (ty.isTupleOrAnonStruct()) {
+ const tuple = ty.tupleFields();
+ return tuple.values[index];
+ }
+ unreachable;
+ },
+ .undef => return Value.undef,
else => unreachable,
}
@@ -2031,24 +2732,34 @@ pub const Value = extern union {
}
/// Returns a pointer to the element value at the index.
- pub fn elemPtr(self: Value, allocator: Allocator, index: usize) !Value {
- switch (self.tag()) {
- .elem_ptr => {
- const elem_ptr = self.castTag(.elem_ptr).?.data;
- return Tag.elem_ptr.create(allocator, .{
+ pub fn elemPtr(
+ val: Value,
+ ty: Type,
+ arena: Allocator,
+ index: usize,
+ mod: *Module,
+ ) Allocator.Error!Value {
+ const elem_ty = ty.elemType2();
+ const ptr_val = switch (val.tag()) {
+ .slice => val.castTag(.slice).?.data.ptr,
+ else => val,
+ };
+
+ if (ptr_val.tag() == .elem_ptr) {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ if (elem_ptr.elem_ty.eql(elem_ty, mod)) {
+ return Tag.elem_ptr.create(arena, .{
.array_ptr = elem_ptr.array_ptr,
+ .elem_ty = elem_ptr.elem_ty,
.index = elem_ptr.index + index,
});
- },
- .slice => return Tag.elem_ptr.create(allocator, .{
- .array_ptr = self.castTag(.slice).?.data.ptr,
- .index = index,
- }),
- else => return Tag.elem_ptr.create(allocator, .{
- .array_ptr = self,
- .index = index,
- }),
+ }
}
+ return Tag.elem_ptr.create(arena, .{
+ .array_ptr = ptr_val,
+ .elem_ty = elem_ty,
+ .index = index,
+ });
}
pub fn isUndef(self: Value) bool {
@@ -2129,6 +2840,15 @@ pub const Value = extern union {
};
}
+ /// Value of the optional, null if optional has no payload.
+ pub fn optionalValue(val: Value) ?Value {
+ if (val.isNull()) return null;
+
+ // Valid for optional representation to be the direct value
+ // and not use opt_payload.
+ return if (val.castTag(.opt_payload)) |p| p.data else val;
+ }
+
/// Valid for all types. Asserts the value is not undefined.
pub fn isFloat(self: Value) bool {
return switch (self.tag()) {
@@ -2139,31 +2859,43 @@ pub const Value = extern union {
.float_16,
.float_32,
.float_64,
+ .float_80,
.float_128,
=> true,
else => false,
};
}
- pub fn intToFloat(val: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value {
+ if (int_ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, int_ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intToFloatScalar(val, arena, float_ty, target);
+ }
+
+ pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target) !Value {
switch (val.tag()) {
.undef, .zero, .one => return val,
.the_only_possible_value => return Value.initTag(.zero), // for i0, u0
.int_u64 => {
- return intToFloatInner(val.castTag(.int_u64).?.data, arena, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target);
},
.int_i64 => {
- return intToFloatInner(val.castTag(.int_i64).?.data, arena, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target);
},
.int_big_positive => {
const limbs = val.castTag(.int_big_positive).?.data;
const float = bigIntToFloat(limbs, true);
- return floatToValue(float, arena, dest_ty, target);
+ return floatToValue(float, arena, float_ty, target);
},
.int_big_negative => {
const limbs = val.castTag(.int_big_negative).?.data;
const float = bigIntToFloat(limbs, false);
- return floatToValue(float, arena, dest_ty, target);
+ return floatToValue(float, arena, float_ty, target);
},
else => unreachable,
}
@@ -2174,6 +2906,7 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)),
+ 80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)),
128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)),
else => unreachable,
}
@@ -2184,111 +2917,28 @@ pub const Value = extern union {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)),
+ 80 => return Value.Tag.float_80.create(arena, @floatCast(f80, float)),
128 => return Value.Tag.float_128.create(arena, float),
else => unreachable,
}
}
- pub fn floatToInt(val: Value, arena: Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
- const Limb = std.math.big.Limb;
-
- var value = val.toFloat(f64); // TODO: f128 ?
- if (std.math.isNan(value) or std.math.isInf(value)) {
- return error.FloatCannotFit;
- }
-
- const isNegative = std.math.signbit(value);
- value = std.math.fabs(value);
-
- const floored = std.math.floor(value);
-
- var rational = try std.math.big.Rational.init(arena);
- defer rational.deinit();
- rational.setFloat(f64, floored) catch |err| switch (err) {
- error.NonFiniteFloat => unreachable,
- error.OutOfMemory => return error.OutOfMemory,
- };
-
- // The float is reduced in rational.setFloat, so we assert that denominator is equal to one
- const bigOne = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
- assert(rational.q.toConst().eqAbs(bigOne));
-
- const result_limbs = try arena.dupe(Limb, rational.p.toConst().limbs);
- const result = if (isNegative)
- try Value.Tag.int_big_negative.create(arena, result_limbs)
- else
- try Value.Tag.int_big_positive.create(arena, result_limbs);
-
- if (result.intFitsInType(dest_ty, target)) {
- return result;
- } else {
- return error.FloatCannotFit;
- }
- }
-
fn calcLimbLenFloat(scalar: anytype) usize {
if (scalar == 0) {
return 1;
}
- const w_value = std.math.fabs(scalar);
+ const w_value = @fabs(scalar);
return @divFloor(@floatToInt(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1;
}
pub const OverflowArithmeticResult = struct {
- overflowed: bool,
+ /// TODO: Rename to `overflow_bit` and make of type `u1`.
+ overflowed: Value,
wrapped_result: Value,
};
- pub fn intAddWithOverflow(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !OverflowArithmeticResult {
- const info = ty.intInfo(target);
-
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try arena.alloc(
- std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const result = try fromBigInt(arena, result_bigint.toConst());
- return OverflowArithmeticResult{
- .overflowed = overflowed,
- .wrapped_result = result,
- };
- }
-
- /// Supports both floats and ints; handles undefined.
- pub fn numberAddWrap(
- lhs: Value,
- rhs: Value,
- ty: Type,
- arena: Allocator,
- target: Target,
- ) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
-
- if (ty.zigTypeTag() == .ComptimeInt) {
- return intAdd(lhs, rhs, arena);
- }
-
- if (ty.isAnyFloat()) {
- return floatAdd(lhs, rhs, ty, arena);
- }
-
- const overflow_result = try intAddWithOverflow(lhs, rhs, ty, arena, target);
- return overflow_result.wrapped_result;
- }
-
- fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
+ pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
if (big_int.positive) {
if (big_int.to(u64)) |x| {
return Value.Tag.int_u64.create(arena, x);
@@ -2304,7 +2954,7 @@ pub const Value = extern union {
}
}
- /// Supports integers only; asserts neither operand is undefined.
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intAddSat(
lhs: Value,
rhs: Value,
@@ -2312,74 +2962,62 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
- assert(!lhs.isUndef());
- assert(!rhs.isUndef());
-
- const info = ty.intInfo(target);
-
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try arena.alloc(
- std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- return fromBigInt(arena, result_bigint.toConst());
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intAddSatScalar(lhs, rhs, ty, arena, target);
}
- pub fn intSubWithOverflow(
+ /// Supports integers only; asserts neither operand is undefined.
+ pub fn intAddSatScalar(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
- ) !OverflowArithmeticResult {
+ ) !Value {
+ assert(!lhs.isUndef());
+ assert(!rhs.isUndef());
+
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
- const wrapped_result = try fromBigInt(arena, result_bigint.toConst());
- return OverflowArithmeticResult{
- .overflowed = overflowed,
- .wrapped_result = wrapped_result,
- };
+ result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits);
+ return fromBigInt(arena, result_bigint.toConst());
}
- /// Supports both floats and ints; handles undefined.
- pub fn numberSubWrap(
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
+ pub fn intSubSat(
lhs: Value,
rhs: Value,
ty: Type,
arena: Allocator,
target: Target,
) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
-
- if (ty.zigTypeTag() == .ComptimeInt) {
- return intSub(lhs, rhs, arena);
- }
-
- if (ty.isAnyFloat()) {
- return floatSub(lhs, rhs, ty, arena);
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
}
-
- const overflow_result = try intSubWithOverflow(lhs, rhs, ty, arena, target);
- return overflow_result.wrapped_result;
+ return intSubSatScalar(lhs, rhs, ty, arena, target);
}
/// Supports integers only; asserts neither operand is undefined.
- pub fn intSubSat(
+ pub fn intSubSatScalar(
lhs: Value,
rhs: Value,
ty: Type,
@@ -2393,8 +3031,8 @@ pub const Value = extern union {
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -2411,12 +3049,35 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !OverflowArithmeticResult {
+ if (ty.zigTypeTag() == .Vector) {
+ const overflowed_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const of_math_result = try intMulWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ overflowed_data[i] = of_math_result.overflowed;
+ scalar.* = of_math_result.wrapped_result;
+ }
+ return OverflowArithmeticResult{
+ .overflowed = try Value.Tag.aggregate.create(arena, overflowed_data),
+ .wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
+ };
+ }
+ return intMulWithOverflowScalar(lhs, rhs, ty, arena, target);
+ }
+
+ pub fn intMulWithOverflowScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2434,12 +3095,12 @@ pub const Value = extern union {
}
return OverflowArithmeticResult{
- .overflowed = overflowed,
+ .overflowed = makeBool(overflowed),
.wrapped_result = try fromBigInt(arena, result_bigint.toConst()),
};
}
- /// Supports both floats and ints; handles undefined.
+ /// Supports both (vectors of) floats and ints; handles undefined scalars.
pub fn numberMulWrap(
lhs: Value,
rhs: Value,
@@ -2447,21 +3108,39 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return numberMulWrapScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports both floats and ints; handles undefined.
+ pub fn numberMulWrapScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
if (ty.zigTypeTag() == .ComptimeInt) {
- return intMul(lhs, rhs, arena);
+ return intMul(lhs, rhs, ty, arena, target);
}
if (ty.isAnyFloat()) {
- return floatMul(lhs, rhs, ty, arena);
+ return floatMul(lhs, rhs, ty, arena, target);
}
const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, target);
return overflow_result.wrapped_result;
}
- /// Supports integers only; asserts neither operand is undefined.
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
pub fn intMulSat(
lhs: Value,
rhs: Value,
@@ -2469,6 +3148,24 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return intMulSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// Supports (vectors of) integers only; asserts neither operand is undefined.
+ pub fn intMulSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
@@ -2476,8 +3173,8 @@ pub const Value = extern union {
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(
@@ -2497,39 +3194,55 @@ pub const Value = extern union {
}
/// Supports both floats and ints; handles undefined.
- pub fn numberMax(lhs: Value, rhs: Value) !Value {
+ pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value {
if (lhs.isUndef() or rhs.isUndef()) return undef;
if (lhs.isNan()) return rhs;
if (rhs.isNan()) return lhs;
- return switch (order(lhs, rhs)) {
+ return switch (order(lhs, rhs, target)) {
.lt => rhs,
.gt, .eq => lhs,
};
}
/// Supports both floats and ints; handles undefined.
- pub fn numberMin(lhs: Value, rhs: Value) !Value {
+ pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value {
if (lhs.isUndef() or rhs.isUndef()) return undef;
if (lhs.isNan()) return rhs;
if (rhs.isNan()) return lhs;
- return switch (order(lhs, rhs)) {
+ return switch (order(lhs, rhs, target)) {
.lt => lhs,
.gt, .eq => rhs,
};
}
- /// operands must be integers; handles undefined.
+ /// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return bitwiseNotScalar(val, ty, arena, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (val.isUndef()) return Value.initTag(.undef);
const info = ty.intInfo(target);
+ if (info.bits == 0) {
+ return val;
+ }
+
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space);
+ const val_bigint = val.toBigInt(&val_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -2540,16 +3253,28 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- /// operands must be integers; handles undefined.
- pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseAndScalar(lhs, rhs, allocator, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -2560,30 +3285,54 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- /// operands must be integers; handles undefined.
+ /// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return bitwiseNandScalar(lhs, rhs, ty, arena, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- const anded = try bitwiseAnd(lhs, rhs, arena);
+ const anded = try bitwiseAnd(lhs, rhs, ty, arena, target);
const all_ones = if (ty.isSignedInt())
try Value.Tag.int_i64.create(arena, -1)
else
try ty.maxInt(arena, target);
- return bitwiseXor(anded, all_ones, arena);
+ return bitwiseXor(anded, all_ones, ty, arena, target);
+ }
+
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseOrScalar(lhs, rhs, allocator, target);
}
- /// operands must be integers; handles undefined.
- pub fn bitwiseOr(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
@@ -2593,16 +3342,28 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- /// operands must be integers; handles undefined.
- pub fn bitwiseXor(lhs: Value, rhs: Value, arena: Allocator) !Value {
+ /// operands must be (vectors of) integers; handles undefined scalars.
+ pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return bitwiseXorScalar(lhs, rhs, allocator, target);
+ }
+
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -2613,45 +3374,24 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intAdd(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.add(lhs_bigint, rhs_bigint);
- return fromBigInt(allocator, result_bigint.toConst());
- }
-
- pub fn intSub(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.sub(lhs_bigint, rhs_bigint);
- return fromBigInt(allocator, result_bigint.toConst());
+ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intDivScalar(lhs, rhs, allocator, target);
}
- pub fn intDiv(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2670,13 +3410,24 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intDivFloor(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intDivFloorScalar(lhs, rhs, allocator, target);
+ }
+
+ pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2695,13 +3446,24 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intRemScalar(lhs, rhs, allocator, target);
+ }
+
+ pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2722,13 +3484,24 @@ pub const Value = extern union {
return fromBigInt(allocator, result_r.toConst());
}
- pub fn intMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intModScalar(lhs, rhs, allocator, target);
+ }
+
+ pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -2753,32 +3526,114 @@ pub const Value = extern union {
.float_16 => std.math.isNan(val.castTag(.float_16).?.data),
.float_32 => std.math.isNan(val.castTag(.float_32).?.data),
.float_64 => std.math.isNan(val.castTag(.float_64).?.data),
+ .float_80 => std.math.isNan(val.castTag(.float_80).?.data),
.float_128 => std.math.isNan(val.castTag(.float_128).?.data),
else => false,
};
}
- pub fn floatRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- _ = lhs;
- _ = rhs;
- _ = allocator;
- @panic("TODO implement Value.floatRem");
+ pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatRemScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val));
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatModScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val));
+ },
+ 128 => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
}
- pub fn floatMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
- _ = lhs;
- _ = rhs;
- _ = allocator;
- @panic("TODO implement Value.floatMod");
+ pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intMulScalar(lhs, rhs, allocator, target);
}
- pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, target);
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -2793,9 +3648,41 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
+ pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intTruncScalar(val, allocator, signedness, bits, target);
+ }
+
+ /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
+ pub fn intTruncBitsAsValue(
+ val: Value,
+ ty: Type,
+ allocator: Allocator,
+ signedness: std.builtin.Signedness,
+ bits: Value,
+ target: Target,
+ ) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt(target)), target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target);
+ }
+
+ pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value {
+ if (bits == 0) return Value.zero;
+
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space);
+ const val_bigint = val.toBigInt(&val_space, target);
const limbs = try allocator.alloc(
std.math.big.Limb,
@@ -2807,12 +3694,23 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn shl(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return shlScalar(lhs, rhs, allocator, target);
+ }
+
+ pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const shift = @intCast(usize, rhs.toUnsignedInt());
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const shift = @intCast(usize, rhs.toUnsignedInt(target));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2833,10 +3731,33 @@ pub const Value = extern union {
allocator: Allocator,
target: Target,
) !OverflowArithmeticResult {
+ if (ty.zigTypeTag() == .Vector) {
+ const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ const of_math_result = try shlWithOverflowScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), allocator, target);
+ overflowed_data[i] = of_math_result.overflowed;
+ scalar.* = of_math_result.wrapped_result;
+ }
+ return OverflowArithmeticResult{
+ .overflowed = try Value.Tag.aggregate.create(allocator, overflowed_data),
+ .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data),
+ };
+ }
+ return shlWithOverflowScalar(lhs, rhs, ty, allocator, target);
+ }
+
+ pub fn shlWithOverflowScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ allocator: Allocator,
+ target: Target,
+ ) !OverflowArithmeticResult {
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const shift = @intCast(usize, rhs.toUnsignedInt());
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const shift = @intCast(usize, rhs.toUnsignedInt(target));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2852,7 +3773,7 @@ pub const Value = extern union {
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
- .overflowed = overflowed,
+ .overflowed = makeBool(overflowed),
.wrapped_result = try fromBigInt(allocator, result_bigint.toConst()),
};
}
@@ -2864,16 +3785,33 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return shlSatScalar(lhs, rhs, ty, arena, target);
+ }
+
+ pub fn shlSatScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
const info = ty.intInfo(target);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const shift = @intCast(usize, rhs.toUnsignedInt());
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const shift = @intCast(usize, rhs.toUnsignedInt(target));
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.big.int.calcTwosCompLimbCount(info.bits),
+ std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
);
var result_bigint = BigIntMutable{
.limbs = limbs,
@@ -2891,18 +3829,46 @@ pub const Value = extern union {
arena: Allocator,
target: Target,
) !Value {
- const shifted = try lhs.shl(rhs, arena);
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return shlTruncScalar(lhs, rhs, ty, arena, target);
+ }
+
+ pub fn shlTruncScalar(
+ lhs: Value,
+ rhs: Value,
+ ty: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
+ const shifted = try lhs.shl(rhs, ty, arena, target);
const int_info = ty.intInfo(target);
- const truncated = try shifted.intTrunc(arena, int_info.signedness, int_info.bits);
+ const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, target);
return truncated;
}
- pub fn shr(lhs: Value, rhs: Value, allocator: Allocator) !Value {
+ pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
+ if (ty.zigTypeTag() == .Vector) {
+ const result_data = try allocator.alloc(Value, ty.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
+ }
+ return Value.Tag.aggregate.create(allocator, result_data);
+ }
+ return shrScalar(lhs, rhs, allocator, target);
+ }
+
+ pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const shift = @intCast(usize, rhs.toUnsignedInt());
+ const lhs_bigint = lhs.toBigInt(&lhs_space, target);
+ const shift = @intCast(usize, rhs.toUnsignedInt(target));
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
if (result_limbs == 0) {
@@ -2924,91 +3890,84 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn floatAdd(
- lhs: Value,
- rhs: Value,
+ pub fn floatNeg(
+ val: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
- const lhs_val = lhs.toFloat(f16);
- const rhs_val = rhs.toFloat(f16);
- return Value.Tag.float_16.create(arena, lhs_val + rhs_val);
- },
- .f32 => {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- return Value.Tag.float_32.create(arena, lhs_val + rhs_val);
- },
- .f64 => {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- return Value.Tag.float_64.create(arena, lhs_val + rhs_val);
- },
- .f128, .comptime_float, .c_longdouble => {
- const lhs_val = lhs.toFloat(f128);
- const rhs_val = rhs.toFloat(f128);
- return Value.Tag.float_128.create(arena, lhs_val + rhs_val);
- },
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatNegScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatNegScalar(val, float_type, arena, target);
+ }
+
+ pub fn floatNegScalar(
+ val: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16)),
+ 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32)),
+ 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64)),
+ 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80)),
+ 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128)),
else => unreachable,
}
}
- pub fn floatSub(
+ pub fn floatDiv(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
- const lhs_val = lhs.toFloat(f16);
- const rhs_val = rhs.toFloat(f16);
- return Value.Tag.float_16.create(arena, lhs_val - rhs_val);
- },
- .f32 => {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- return Value.Tag.float_32.create(arena, lhs_val - rhs_val);
- },
- .f64 => {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- return Value.Tag.float_64.create(arena, lhs_val - rhs_val);
- },
- .f128, .comptime_float, .c_longdouble => {
- const lhs_val = lhs.toFloat(f128);
- const rhs_val = rhs.toFloat(f128);
- return Value.Tag.float_128.create(arena, lhs_val - rhs_val);
- },
- else => unreachable,
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
}
+ return floatDivScalar(lhs, rhs, float_type, arena, target);
}
- pub fn floatDiv(
+ pub fn floatDivScalar(
lhs: Value,
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val / rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val / rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val / rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val / rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val / rhs_val);
@@ -3022,24 +3981,47 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatDivFloorScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatDivFloorScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
+ ) !Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val));
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val));
@@ -3053,24 +4035,47 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
+ ) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatDivTruncScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatDivTruncScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val));
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val));
@@ -3084,24 +4089,47 @@ pub const Value = extern union {
rhs: Value,
float_type: Type,
arena: Allocator,
+ target: Target,
+ ) !Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floatMulScalar(lhs, rhs, float_type, arena, target);
+ }
+
+ pub fn floatMulScalar(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: Allocator,
+ target: Target,
) !Value {
- switch (float_type.tag()) {
- .f16 => {
+ switch (float_type.floatBits(target)) {
+ 16 => {
const lhs_val = lhs.toFloat(f16);
const rhs_val = rhs.toFloat(f16);
return Value.Tag.float_16.create(arena, lhs_val * rhs_val);
},
- .f32 => {
+ 32 => {
const lhs_val = lhs.toFloat(f32);
const rhs_val = rhs.toFloat(f32);
return Value.Tag.float_32.create(arena, lhs_val * rhs_val);
},
- .f64 => {
+ 64 => {
const lhs_val = lhs.toFloat(f64);
const rhs_val = rhs.toFloat(f64);
return Value.Tag.float_64.create(arena, lhs_val * rhs_val);
},
- .f128, .comptime_float, .c_longdouble => {
+ 80 => {
+ const lhs_val = lhs.toFloat(f80);
+ const rhs_val = rhs.toFloat(f80);
+ return Value.Tag.float_80.create(arena, lhs_val * rhs_val);
+ },
+ 128 => {
const lhs_val = lhs.toFloat(f128);
const rhs_val = rhs.toFloat(f128);
return Value.Tag.float_128.create(arena, lhs_val * rhs_val);
@@ -3110,6 +4138,592 @@ pub const Value = extern union {
}
}
+ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return sqrtScalar(val, float_type, arena, target);
+ }
+
+ pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @sqrt(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @sqrt(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @sqrt(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @sqrt(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @sqrt(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return sinScalar(val, float_type, arena, target);
+ }
+
+ pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @sin(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @sin(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @sin(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @sin(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @sin(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return cosScalar(val, float_type, arena, target);
+ }
+
+ pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @cos(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @cos(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @cos(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @cos(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @cos(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn tan(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try tanScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return tanScalar(val, float_type, arena, target);
+ }
+
+ pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @tan(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @tan(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @tan(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @tan(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @tan(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return expScalar(val, float_type, arena, target);
+ }
+
+ pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @exp(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @exp(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @exp(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @exp(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @exp(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return exp2Scalar(val, float_type, arena, target);
+ }
+
+ pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @exp2(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @exp2(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @exp2(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @exp2(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @exp2(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return logScalar(val, float_type, arena, target);
+ }
+
+ pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return log2Scalar(val, float_type, arena, target);
+ }
+
+ pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log2(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log2(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log2(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log2(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log2(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return log10Scalar(val, float_type, arena, target);
+ }
+
+ pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @log10(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @log10(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @log10(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @log10(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @log10(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return fabsScalar(val, float_type, arena, target);
+ }
+
+ pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @fabs(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @fabs(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @fabs(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @fabs(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @fabs(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return floorScalar(val, float_type, arena, target);
+ }
+
+ pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @floor(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @floor(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @floor(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @floor(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @floor(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return ceilScalar(val, float_type, arena, target);
+ }
+
+ pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @ceil(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @ceil(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @ceil(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @ceil(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @ceil(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return roundScalar(val, float_type, arena, target);
+ }
+
+ pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @round(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @round(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @round(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @round(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @round(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target);
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return truncScalar(val, float_type, arena, target);
+ }
+
+ pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const f = val.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @trunc(f));
+ },
+ 32 => {
+ const f = val.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @trunc(f));
+ },
+ 64 => {
+ const f = val.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @trunc(f));
+ },
+ 80 => {
+ const f = val.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @trunc(f));
+ },
+ 128 => {
+ const f = val.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @trunc(f));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn mulAdd(
+ float_type: Type,
+ mulend1: Value,
+ mulend2: Value,
+ addend: Value,
+ arena: Allocator,
+ target: Target,
+ ) Allocator.Error!Value {
+ if (float_type.zigTypeTag() == .Vector) {
+ const result_data = try arena.alloc(Value, float_type.vectorLen());
+ for (result_data) |*scalar, i| {
+ scalar.* = try mulAddScalar(
+ float_type.scalarType(),
+ mulend1.indexVectorlike(i),
+ mulend2.indexVectorlike(i),
+ addend.indexVectorlike(i),
+ arena,
+ target,
+ );
+ }
+ return Value.Tag.aggregate.create(arena, result_data);
+ }
+ return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target);
+ }
+
+ pub fn mulAddScalar(
+ float_type: Type,
+ mulend1: Value,
+ mulend2: Value,
+ addend: Value,
+ arena: Allocator,
+ target: Target,
+ ) Allocator.Error!Value {
+ switch (float_type.floatBits(target)) {
+ 16 => {
+ const m1 = mulend1.toFloat(f16);
+ const m2 = mulend2.toFloat(f16);
+ const a = addend.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a));
+ },
+ 32 => {
+ const m1 = mulend1.toFloat(f32);
+ const m2 = mulend2.toFloat(f32);
+ const a = addend.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a));
+ },
+ 64 => {
+ const m1 = mulend1.toFloat(f64);
+ const m2 = mulend2.toFloat(f64);
+ const a = addend.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a));
+ },
+ 80 => {
+ const m1 = mulend1.toFloat(f80);
+ const m2 = mulend2.toFloat(f80);
+ const a = addend.toFloat(f80);
+ return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a));
+ },
+ 128 => {
+ const m1 = mulend1.toFloat(f128);
+ const m2 = mulend2.toFloat(f128);
+ const a = addend.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a));
+ },
+ else => unreachable,
+ }
+ }
+
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
@@ -3148,9 +4762,14 @@ pub const Value = extern union {
data: *Module.Fn,
};
+ pub const ExternFn = struct {
+ base: Payload,
+ data: *Module.ExternFn,
+ };
+
pub const Decl = struct {
base: Payload,
- data: *Module.Decl,
+ data: Module.Decl.Index,
};
pub const Variable = struct {
@@ -3170,17 +4789,34 @@ pub const Value = extern union {
data: Data,
pub const Data = struct {
- decl: *Module.Decl,
- runtime_index: u32,
+ decl_index: Module.Decl.Index,
+ runtime_index: RuntimeIndex,
};
};
+ pub const PayloadPtr = struct {
+ base: Payload,
+ data: struct {
+ container_ptr: Value,
+ container_ty: Type,
+ },
+ };
+
+ pub const ComptimeFieldPtr = struct {
+ base: Payload,
+ data: struct {
+ field_val: Value,
+ field_ty: Type,
+ },
+ };
+
pub const ElemPtr = struct {
pub const base_tag = Tag.elem_ptr;
base: Payload = Payload{ .tag = base_tag },
data: struct {
array_ptr: Value,
+ elem_ty: Type,
index: usize,
},
};
@@ -3191,6 +4827,7 @@ pub const Value = extern union {
base: Payload = Payload{ .tag = base_tag },
data: struct {
container_ptr: Value,
+ container_ty: Type,
field_index: usize,
},
};
@@ -3201,8 +4838,15 @@ pub const Value = extern union {
data: []const u8,
};
- pub const Array = struct {
+ pub const StrLit = struct {
+ base: Payload,
+ data: Module.StringLiteralContext.Key,
+ };
+
+ pub const Aggregate = struct {
base: Payload,
+ /// Field values. The types are according to the struct or array type.
+ /// The length is provided here so that copying a Value does not depend on the Type.
data: []Value,
};
@@ -3212,6 +4856,9 @@ pub const Value = extern union {
ptr: Value,
len: Value,
},
+
+ pub const ptr_index = 0;
+ pub const len_index = 1;
};
pub const Ty = struct {
@@ -3250,6 +4897,13 @@ pub const Value = extern union {
data: f64,
};
+ pub const Float_80 = struct {
+ pub const base_tag = Tag.float_80;
+
+ base: Payload = .{ .tag = base_tag },
+ data: f80,
+ };
+
pub const Float_128 = struct {
pub const base_tag = Tag.float_128;
@@ -3278,7 +4932,7 @@ pub const Value = extern union {
/// `Module.resolvePeerTypes`.
stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Ref) = .{},
/// 0 means ABI-aligned.
- alignment: u16,
+ alignment: u32,
},
};
@@ -3287,21 +4941,12 @@ pub const Value = extern union {
base: Payload = .{ .tag = base_tag },
data: struct {
- decl: *Module.Decl,
+ decl_index: Module.Decl.Index,
/// 0 means ABI-aligned.
- alignment: u16,
+ alignment: u32,
},
};
- pub const Struct = struct {
- pub const base_tag = Tag.@"struct";
-
- base: Payload = .{ .tag = base_tag },
- /// Field values. The types are according to the struct type.
- /// The length is provided here so that copying a Value does not depend on the Type.
- data: []Value,
- };
-
pub const Union = struct {
pub const base_tag = Tag.@"union";
@@ -3338,6 +4983,20 @@ pub const Value = extern union {
pub const @"null" = initTag(.null_value);
pub const @"false" = initTag(.bool_false);
pub const @"true" = initTag(.bool_true);
+
+ pub fn makeBool(x: bool) Value {
+ return if (x) Value.@"true" else Value.@"false";
+ }
+
+ pub const RuntimeIndex = enum(u32) {
+ zero = 0,
+ comptime_field_ptr = std.math.maxInt(u32),
+ _,
+
+ pub fn increment(ri: *RuntimeIndex) void {
+ ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1);
+ }
+ };
};
var negative_one_payload: Value.Payload.I64 = .{