aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2023-05-26 21:22:34 -0400
committerAndrew Kelley <andrew@ziglang.org>2023-06-10 20:47:56 -0700
commit2d5bc0146941f4cc207c4fd23058e25a16fd40a7 (patch)
tree64087a3ecf4d63d9e53a5f04156dff508d58bd26 /src
parentc8b0d4d149c891ed83db57fe6986d10c5dd654af (diff)
downloadzig-2d5bc0146941f4cc207c4fd23058e25a16fd40a7.tar.gz
zig-2d5bc0146941f4cc207c4fd23058e25a16fd40a7.zip
behavior: get more test cases passing with llvm
Diffstat (limited to 'src')
-rw-r--r--src/InternPool.zig409
-rw-r--r--src/Module.zig83
-rw-r--r--src/RangeSet.zig58
-rw-r--r--src/Sema.zig671
-rw-r--r--src/codegen.zig2
-rw-r--r--src/codegen/llvm.zig34
-rw-r--r--src/codegen/spirv.zig50
-rw-r--r--src/type.zig35
-rw-r--r--src/value.zig200
9 files changed, 746 insertions, 796 deletions
diff --git a/src/InternPool.zig b/src/InternPool.zig
index fdc0a9af3a..7af91529c1 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -621,8 +621,7 @@ pub const Key = union(enum) {
pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void {
const KeyTag = @typeInfo(Key).Union.tag_type.?;
- const key_tag: KeyTag = key;
- std.hash.autoHash(hasher, key_tag);
+ std.hash.autoHash(hasher, @as(KeyTag, key));
switch (key) {
inline .int_type,
.ptr_type,
@@ -710,39 +709,58 @@ pub const Key = union(enum) {
.aggregate => |aggregate| {
std.hash.autoHash(hasher, aggregate.ty);
- switch (ip.indexToKey(aggregate.ty)) {
- .array_type => |array_type| if (array_type.child == .u8_type) {
- switch (aggregate.storage) {
- .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte),
- .elems => |elems| {
- var buffer: Key.Int.Storage.BigIntSpace = undefined;
- for (elems) |elem| std.hash.autoHash(
+ const len = ip.aggregateTypeLen(aggregate.ty);
+ const child = switch (ip.indexToKey(aggregate.ty)) {
+ .array_type => |array_type| array_type.child,
+ .vector_type => |vector_type| vector_type.child,
+ .anon_struct_type, .struct_type => .none,
+ else => unreachable,
+ };
+
+ if (child == .u8_type) {
+ switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| {
+ std.hash.autoHash(hasher, KeyTag.int);
+ std.hash.autoHash(hasher, byte);
+ },
+ .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| {
+ const elem_key = ip.indexToKey(elem);
+ std.hash.autoHash(hasher, @as(KeyTag, elem_key));
+ switch (elem_key) {
+ .undef => {},
+ .int => |int| std.hash.autoHash(
hasher,
- ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
- unreachable,
- );
- },
- .repeated_elem => |elem| {
- const len = ip.aggregateTypeLen(aggregate.ty);
- var buffer: Key.Int.Storage.BigIntSpace = undefined;
- const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
- unreachable;
- var i: u64 = 0;
- while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
- },
- }
- return;
- },
- else => {},
+ @intCast(u8, int.storage.u64),
+ ),
+ else => unreachable,
+ }
+ },
+ .repeated_elem => |elem| {
+ const elem_key = ip.indexToKey(elem);
+ var remaining = len;
+ while (remaining > 0) : (remaining -= 1) {
+ std.hash.autoHash(hasher, @as(KeyTag, elem_key));
+ switch (elem_key) {
+ .undef => {},
+ .int => |int| std.hash.autoHash(
+ hasher,
+ @intCast(u8, int.storage.u64),
+ ),
+ else => unreachable,
+ }
+ }
+ },
+ }
+ return;
}
switch (aggregate.storage) {
.bytes => unreachable,
- .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem),
+ .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem|
+ std.hash.autoHash(hasher, elem),
.repeated_elem => |elem| {
- const len = ip.aggregateTypeLen(aggregate.ty);
- var i: u64 = 0;
- while (i < len) : (i += 1) std.hash.autoHash(hasher, elem);
+ var remaining = len;
+ while (remaining > 0) : (remaining -= 1) std.hash.autoHash(hasher, elem);
},
}
},
@@ -960,9 +978,10 @@ pub const Key = union(enum) {
const b_info = b.aggregate;
if (a_info.ty != b_info.ty) return false;
+ const len = ip.aggregateTypeLen(a_info.ty);
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
- for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| {
+ for (0..@intCast(usize, len)) |elem_index| {
const a_elem = switch (a_info.storage) {
.bytes => |bytes| ip.getIfExists(.{ .int = .{
.ty = .u8_type,
@@ -987,11 +1006,19 @@ pub const Key = union(enum) {
switch (a_info.storage) {
.bytes => |a_bytes| {
const b_bytes = b_info.storage.bytes;
- return std.mem.eql(u8, a_bytes, b_bytes);
+ return std.mem.eql(
+ u8,
+ a_bytes[0..@intCast(usize, len)],
+ b_bytes[0..@intCast(usize, len)],
+ );
},
.elems => |a_elems| {
const b_elems = b_info.storage.elems;
- return std.mem.eql(Index, a_elems, b_elems);
+ return std.mem.eql(
+ Index,
+ a_elems[0..@intCast(usize, len)],
+ b_elems[0..@intCast(usize, len)],
+ );
},
.repeated_elem => |a_elem| {
const b_elem = b_info.storage.repeated_elem;
@@ -2691,7 +2718,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.bytes => {
const extra = ip.extraData(Bytes, data);
- const len = @intCast(u32, ip.aggregateTypeLen(extra.ty));
+ const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty));
return .{ .aggregate = .{
.ty = extra.ty,
.storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] },
@@ -2699,7 +2726,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.aggregate => {
const extra = ip.extraDataTrail(Aggregate, data);
- const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty));
+ const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
return .{ .aggregate = .{
.ty = extra.data.ty,
@@ -3145,7 +3172,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}),
}),
.int => |int| {
- assert(int != .none);
+ assert(ip.typeOf(int) == .usize_type);
ip.items.appendAssumeCapacity(.{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrAddr{
@@ -3452,7 +3479,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.enum_tag => |enum_tag| {
assert(ip.isEnumType(enum_tag.ty));
- assert(ip.indexToKey(enum_tag.int) == .int);
+ switch (ip.indexToKey(enum_tag.ty)) {
+ .simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
+ .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty),
+ else => unreachable,
+ }
ip.items.appendAssumeCapacity(.{
.tag = .enum_tag,
.data = try ip.addExtra(gpa, enum_tag),
@@ -3501,21 +3532,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.aggregate => |aggregate| {
const ty_key = ip.indexToKey(aggregate.ty);
- const aggregate_len = ip.aggregateTypeLen(aggregate.ty);
+ const len = ip.aggregateTypeLen(aggregate.ty);
+ const child = switch (ty_key) {
+ .array_type => |array_type| array_type.child,
+ .vector_type => |vector_type| vector_type.child,
+ .anon_struct_type, .struct_type => .none,
+ else => unreachable,
+ };
+ const sentinel = switch (ty_key) {
+ .array_type => |array_type| array_type.sentinel,
+ .vector_type, .anon_struct_type, .struct_type => .none,
+ else => unreachable,
+ };
+ const len_including_sentinel = len + @boolToInt(sentinel != .none);
switch (aggregate.storage) {
.bytes => |bytes| {
- assert(ty_key.array_type.child == .u8_type);
- assert(bytes.len == aggregate_len);
+ assert(child == .u8_type);
+ if (bytes.len != len) {
+ assert(bytes.len == len_including_sentinel);
+ assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64);
+ unreachable;
+ }
},
.elems => |elems| {
- assert(elems.len == aggregate_len);
+ if (elems.len != len) {
+ assert(elems.len == len_including_sentinel);
+ assert(elems[len] == sentinel);
+ unreachable;
+ }
+ },
+ .repeated_elem => |elem| {
+ assert(sentinel == .none or elem == sentinel);
},
- .repeated_elem => {},
}
switch (ty_key) {
- inline .array_type, .vector_type => |seq_type| {
+ .array_type, .vector_type => {
for (aggregate.storage.values()) |elem| {
- assert(ip.typeOf(elem) == seq_type.child);
+ assert(ip.typeOf(elem) == child);
}
},
.struct_type => |struct_type| {
@@ -3534,7 +3587,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
else => unreachable,
}
- if (aggregate_len == 0) {
+ if (len == 0) {
ip.items.appendAssumeCapacity(.{
.tag = .only_possible_value,
.data = @enumToInt(aggregate.ty),
@@ -3543,41 +3596,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
switch (ty_key) {
- .anon_struct_type => |anon_struct_type| {
- if (switch (aggregate.storage) {
+ .anon_struct_type => |anon_struct_type| opv: {
+ switch (aggregate.storage) {
.bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| {
if (value != ip.getIfExists(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
- } })) break false;
- } else true,
- .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems),
+ } })) break :opv;
+ },
+ .elems => |elems| if (!std.mem.eql(
+ Index,
+ anon_struct_type.values,
+ elems,
+ )) break :opv,
.repeated_elem => |elem| for (anon_struct_type.values) |value| {
- if (value != elem) break false;
- } else true,
- }) {
- // This encoding works thanks to the fact that, as we just verified,
- // the type itself contains a slice of values that can be provided
- // in the aggregate fields.
- ip.items.appendAssumeCapacity(.{
- .tag = .only_possible_value,
- .data = @enumToInt(aggregate.ty),
- });
- return @intToEnum(Index, ip.items.len - 1);
+ if (value != elem) break :opv;
+ },
}
+ // This encoding works thanks to the fact that, as we just verified,
+ // the type itself contains a slice of values that can be provided
+ // in the aggregate fields.
+ ip.items.appendAssumeCapacity(.{
+ .tag = .only_possible_value,
+ .data = @enumToInt(aggregate.ty),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
},
else => {},
}
- if (switch (aggregate.storage) {
- .bytes => |bytes| for (bytes[1..]) |byte| {
- if (byte != bytes[0]) break false;
- } else true,
- .elems => |elems| for (elems[1..]) |elem| {
- if (elem != elems[0]) break false;
- } else true,
- .repeated_elem => true,
- }) {
+ repeated: {
+ switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte|
+ if (byte != bytes[0]) break :repeated,
+ .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem|
+ if (elem != elems[0]) break :repeated,
+ .repeated_elem => {},
+ }
const elem = switch (aggregate.storage) {
.bytes => |bytes| elem: {
_ = ip.map.pop();
@@ -3607,42 +3662,48 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
return @intToEnum(Index, ip.items.len - 1);
}
- switch (ty_key) {
- .array_type => |array_type| if (array_type.child == .u8_type) {
- const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none);
- try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1);
- try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
- var buffer: Key.Int.Storage.BigIntSpace = undefined;
- switch (aggregate.storage) {
- .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
- .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity(
- ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
+ if (child == .u8_type) bytes: {
+ const string_bytes_index = ip.string_bytes.items.len;
+ try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1);
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
+ switch (aggregate.storage) {
+ .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
+ .elems => |elems| for (elems) |elem| switch (ip.indexToKey(elem)) {
+ .undef => {
+ ip.string_bytes.shrinkRetainingCapacity(string_bytes_index);
+ break :bytes;
+ },
+ .int => |int| ip.string_bytes.appendAssumeCapacity(
+ @intCast(u8, int.storage.u64),
),
- .repeated_elem => |elem| @memset(
- ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len),
- ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
+ else => unreachable,
+ },
+ .repeated_elem => |elem| switch (ip.indexToKey(elem)) {
+ .undef => break :bytes,
+ .int => |int| @memset(
+ ip.string_bytes.addManyAsSliceAssumeCapacity(len),
+ @intCast(u8, int.storage.u64),
),
- }
- if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity(
- ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch
- unreachable,
- );
- const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel);
- ip.items.appendAssumeCapacity(.{
- .tag = .bytes,
- .data = ip.addExtraAssumeCapacity(Bytes{
- .ty = aggregate.ty,
- .bytes = bytes.toString(),
- }),
- });
- return @intToEnum(Index, ip.items.len - 1);
- },
- else => {},
+ else => unreachable,
+ },
+ }
+ if (sentinel != .none) ip.string_bytes.appendAssumeCapacity(
+ @intCast(u8, ip.indexToKey(sentinel).int.storage.u64),
+ );
+ const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .bytes,
+ .data = ip.addExtraAssumeCapacity(Bytes{
+ .ty = aggregate.ty,
+ .bytes = bytes.toString(),
+ }),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
}
try ip.extra.ensureUnusedCapacity(
gpa,
- @typeInfo(Aggregate).Struct.fields.len + aggregate_len,
+ @typeInfo(Aggregate).Struct.fields.len + len_including_sentinel,
);
ip.items.appendAssumeCapacity(.{
.tag = .aggregate,
@@ -3651,6 +3712,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems));
+ if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel));
},
.un => |un| {
@@ -4183,10 +4245,12 @@ pub fn sliceLen(ip: InternPool, i: Index) Index {
/// Given an existing value, returns the same value but with the supplied type.
/// Only some combinations are allowed:
/// * identity coercion
+/// * undef => any
/// * int <=> int
/// * int <=> enum
/// * enum_literal => enum
/// * ptr <=> ptr
+/// * int => ptr
/// * null_value => opt
/// * payload => opt
/// * error set <=> error set
@@ -4194,68 +4258,93 @@ pub fn sliceLen(ip: InternPool, i: Index) Index {
/// * error set => error union
/// * payload => error union
/// * fn <=> fn
+/// * array <=> array
+/// * array <=> vector
+/// * vector <=> vector
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
const old_ty = ip.typeOf(val);
if (old_ty == new_ty) return val;
- switch (ip.indexToKey(val)) {
- .extern_func => |extern_func| if (ip.isFunctionType(new_ty))
- return ip.get(gpa, .{ .extern_func = .{
- .ty = new_ty,
- .decl = extern_func.decl,
- .lib_name = extern_func.lib_name,
- } }),
- .func => |func| if (ip.isFunctionType(new_ty))
- return ip.get(gpa, .{ .func = .{
- .ty = new_ty,
- .index = func.index,
- } }),
- .int => |int| if (ip.isIntegerType(new_ty))
- return getCoercedInts(ip, gpa, int, new_ty)
- else if (ip.isEnumType(new_ty))
- return ip.get(gpa, .{ .enum_tag = .{
+ switch (val) {
+ .undef => return ip.get(gpa, .{ .undef = new_ty }),
+ .null_value => if (ip.isOptionalType(new_ty))
+ return ip.get(gpa, .{ .opt = .{
.ty = new_ty,
- .int = val,
+ .val = .none,
} }),
- .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
- return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
- .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
- .enum_type => |enum_type| {
- const index = enum_type.nameIndex(ip, enum_literal).?;
+ else => switch (ip.indexToKey(val)) {
+ .undef => return ip.get(gpa, .{ .undef = new_ty }),
+ .extern_func => |extern_func| if (ip.isFunctionType(new_ty))
+ return ip.get(gpa, .{ .extern_func = .{
+ .ty = new_ty,
+ .decl = extern_func.decl,
+ .lib_name = extern_func.lib_name,
+ } }),
+ .func => |func| if (ip.isFunctionType(new_ty))
+ return ip.get(gpa, .{ .func = .{
+ .ty = new_ty,
+ .index = func.index,
+ } }),
+ .int => |int| if (ip.isIntegerType(new_ty))
+ return getCoercedInts(ip, gpa, int, new_ty)
+ else if (ip.isEnumType(new_ty))
return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
- .int = if (enum_type.values.len != 0)
- enum_type.values[index]
- else
- try ip.get(gpa, .{ .int = .{
- .ty = enum_type.tag_ty,
- .storage = .{ .u64 = index },
- } }),
- } });
+ .int = val,
+ } })
+ else if (ip.isPointerType(new_ty))
+ return ip.get(gpa, .{ .ptr = .{
+ .ty = new_ty,
+ .addr = .{ .int = val },
+ } }),
+ .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
+ return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
+ .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
+ .enum_type => |enum_type| {
+ const index = enum_type.nameIndex(ip, enum_literal).?;
+ return ip.get(gpa, .{ .enum_tag = .{
+ .ty = new_ty,
+ .int = if (enum_type.values.len != 0)
+ enum_type.values[index]
+ else
+ try ip.get(gpa, .{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = index },
+ } }),
+ } });
+ },
+ else => {},
},
- else => {},
- },
- .ptr => |ptr| if (ip.isPointerType(new_ty))
- return ip.get(gpa, .{ .ptr = .{
- .ty = new_ty,
- .addr = ptr.addr,
- .len = ptr.len,
- } }),
- .err => |err| if (ip.isErrorSetType(new_ty))
- return ip.get(gpa, .{ .err = .{
- .ty = new_ty,
- .name = err.name,
- } })
- else if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, .{ .error_union = .{
- .ty = new_ty,
- .val = .{ .err_name = err.name },
- } }),
- .error_union => |error_union| if (ip.isErrorUnionType(new_ty))
- return ip.get(gpa, .{ .error_union = .{
+ .ptr => |ptr| if (ip.isPointerType(new_ty))
+ return ip.get(gpa, .{ .ptr = .{
+ .ty = new_ty,
+ .addr = ptr.addr,
+ .len = ptr.len,
+ } }),
+ .err => |err| if (ip.isErrorSetType(new_ty))
+ return ip.get(gpa, .{ .err = .{
+ .ty = new_ty,
+ .name = err.name,
+ } })
+ else if (ip.isErrorUnionType(new_ty))
+ return ip.get(gpa, .{ .error_union = .{
+ .ty = new_ty,
+ .val = .{ .err_name = err.name },
+ } }),
+ .error_union => |error_union| if (ip.isErrorUnionType(new_ty))
+ return ip.get(gpa, .{ .error_union = .{
+ .ty = new_ty,
+ .val = error_union.val,
+ } }),
+ .aggregate => |aggregate| return ip.get(gpa, .{ .aggregate = .{
.ty = new_ty,
- .val = error_union.val,
+ .storage = switch (aggregate.storage) {
+ .bytes => |bytes| .{ .bytes = bytes[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] },
+ .elems => |elems| .{ .elems = elems[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] },
+ .repeated_elem => |elem| .{ .repeated_elem = elem },
+ },
} }),
- else => {},
+ else => {},
+ },
}
switch (ip.indexToKey(new_ty)) {
.opt_type => |child_type| switch (val) {
@@ -4527,7 +4616,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_function => b: {
const info = ip.extraData(TypeFunction, data);
- break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len);
+ break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len);
},
.undef => 0,
@@ -4570,14 +4659,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.bytes => b: {
const info = ip.extraData(Bytes, data);
- const len = @intCast(u32, ip.aggregateTypeLen(info.ty));
+ const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
break :b @sizeOf(Bytes) + len +
@boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0);
},
.aggregate => b: {
const info = ip.extraData(Aggregate, data);
- const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty));
- break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len);
+ const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
+ break :b @sizeOf(Aggregate) + (@sizeOf(Index) * fields_len);
},
.repeated => @sizeOf(Repeated),
@@ -4889,6 +4978,16 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 {
};
}
+pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 {
+ return switch (ip.indexToKey(ty)) {
+ .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
+ .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
+ .array_type => |array_type| array_type.len + @boolToInt(array_type.sentinel != .none),
+ .vector_type => |vector_type| vector_type.len,
+ else => unreachable,
+ };
+}
+
pub fn isNoReturn(ip: InternPool, ty: Index) bool {
return switch (ty) {
.noreturn_type => true,
diff --git a/src/Module.zig b/src/Module.zig
index 1605bffdd9..314e636bab 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -99,6 +99,7 @@ monomorphed_funcs: MonomorphedFuncsSet = .{},
/// The set of all comptime function calls that have been cached so that future calls
/// with the same parameters will get the same return value.
memoized_calls: MemoizedCallSet = .{},
+memoized_call_args: MemoizedCall.Args = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
@@ -230,46 +231,30 @@ pub const MemoizedCallSet = std.HashMapUnmanaged(
);
pub const MemoizedCall = struct {
- module: *Module,
+ args: *const Args,
+
+ pub const Args = std.ArrayListUnmanaged(InternPool.Index);
pub const Key = struct {
func: Fn.Index,
- args: []TypedValue,
- };
+ args_index: u32,
+ args_count: u32,
- pub const Result = struct {
- val: Value,
- arena: std.heap.ArenaAllocator.State,
+ pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index {
+ return ctx.args.items[key.args_index..][0..key.args_count];
+ }
};
- pub fn eql(ctx: @This(), a: Key, b: Key) bool {
- if (a.func != b.func) return false;
-
- assert(a.args.len == b.args.len);
- for (a.args, 0..) |a_arg, arg_i| {
- const b_arg = b.args[arg_i];
- if (!a_arg.eql(b_arg, ctx.module)) {
- return false;
- }
- }
+ pub const Result = InternPool.Index;
- return true;
+ pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool {
+ return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx));
}
- /// Must match `Sema.GenericCallAdapter.hash`.
- pub fn hash(ctx: @This(), key: Key) u64 {
+ pub fn hash(ctx: MemoizedCall, key: Key) u64 {
var hasher = std.hash.Wyhash.init(0);
-
- // The generic function Decl is guaranteed to be the first dependency
- // of each of its instantiations.
std.hash.autoHash(&hasher, key.func);
-
- // This logic must be kept in sync with the logic in `analyzeCall` that
- // computes the hash.
- for (key.args) |arg| {
- arg.hash(&hasher, ctx.module);
- }
-
+ std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep);
return hasher.final();
}
};
@@ -883,6 +868,10 @@ pub const Decl = struct {
return decl.ty.abiAlignment(mod);
}
}
+
+ pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
+ decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
+ }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -3325,15 +3314,8 @@ pub fn deinit(mod: *Module) void {
mod.test_functions.deinit(gpa);
mod.align_stack_fns.deinit(gpa);
mod.monomorphed_funcs.deinit(gpa);
-
- {
- var it = mod.memoized_calls.iterator();
- while (it.next()) |entry| {
- gpa.free(entry.key_ptr.args);
- entry.value_ptr.arena.promote(gpa).deinit();
- }
- mod.memoized_calls.deinit(gpa);
- }
+ mod.memoized_call_args.deinit(gpa);
+ mod.memoized_calls.deinit(gpa);
mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
@@ -5894,6 +5876,7 @@ pub fn initNewAnonDecl(
typed_value: TypedValue,
name: [:0]u8,
) !void {
+ assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern()));
errdefer mod.gpa.free(name);
const new_decl = mod.declPtr(new_decl_index);
@@ -6645,7 +6628,7 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void {
if (decl.alive) return;
decl.alive = true;
- decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
+ try decl.intern(mod);
// This is the first time we are marking this Decl alive. We must
// therefore recurse into its value and mark any Decl it references
@@ -6749,15 +6732,19 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
}
}
- // Canonicalize host_size. If it matches the bit size of the pointee type,
- // we change it to 0 here. If this causes an assertion trip, the pointee type
- // needs to be resolved before calling this ptr() function.
- if (info.host_size != 0) {
- const elem_bit_size = info.elem_type.toType().bitSize(mod);
- assert(info.bit_offset + elem_bit_size <= info.host_size * 8);
- if (info.host_size * 8 == elem_bit_size) {
- canon_info.host_size = 0;
- }
+ switch (info.vector_index) {
+ // Canonicalize host_size. If it matches the bit size of the pointee type,
+ // we change it to 0 here. If this causes an assertion trip, the pointee type
+ // needs to be resolved before calling this ptr() function.
+ .none => if (info.host_size != 0) {
+ const elem_bit_size = info.elem_type.toType().bitSize(mod);
+ assert(info.bit_offset + elem_bit_size <= info.host_size * 8);
+ if (info.host_size * 8 == elem_bit_size) {
+ canon_info.host_size = 0;
+ }
+ },
+ .runtime => {},
+ _ => assert(@enumToInt(info.vector_index) < info.host_size),
}
return (try intern(mod, .{ .ptr_type = canon_info })).toType();
diff --git a/src/RangeSet.zig b/src/RangeSet.zig
index a015c7b568..f808322fc7 100644
--- a/src/RangeSet.zig
+++ b/src/RangeSet.zig
@@ -1,18 +1,18 @@
const std = @import("std");
+const assert = std.debug.assert;
const Order = std.math.Order;
-const RangeSet = @This();
+const InternPool = @import("InternPool.zig");
const Module = @import("Module.zig");
+const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
-const Type = @import("type.zig").Type;
-const Value = @import("value.zig").Value;
ranges: std.ArrayList(Range),
module: *Module,
pub const Range = struct {
- first: Value,
- last: Value,
+ first: InternPool.Index,
+ last: InternPool.Index,
src: SwitchProngSrc,
};
@@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void {
pub fn add(
self: *RangeSet,
- first: Value,
- last: Value,
- ty: Type,
+ first: InternPool.Index,
+ last: InternPool.Index,
src: SwitchProngSrc,
) !?SwitchProngSrc {
+ const mod = self.module;
+ const ip = &mod.intern_pool;
+
+ const ty = ip.typeOf(first);
+ assert(ty == ip.typeOf(last));
+
for (self.ranges.items) |range| {
- if (last.compareScalar(.gte, range.first, ty, self.module) and
- first.compareScalar(.lte, range.last, ty, self.module))
+ assert(ty == ip.typeOf(range.first));
+ assert(ty == ip.typeOf(range.last));
+
+ if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and
+ first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod))
{
return range.src; // They overlap.
}
}
+
try self.ranges.append(.{
.first = first,
.last = last,
@@ -49,30 +58,29 @@ pub fn add(
return null;
}
-const LessThanContext = struct { ty: Type, module: *Module };
-
/// Assumes a and b do not overlap
-fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool {
- return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module);
+fn lessThan(mod: *Module, a: Range, b: Range) bool {
+ const ty = mod.intern_pool.typeOf(a.first).toType();
+ return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod);
}
-pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
+pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
+ const mod = self.module;
+ const ip = &mod.intern_pool;
+ assert(ip.typeOf(first) == ip.typeOf(last));
+
if (self.ranges.items.len == 0)
return false;
- const mod = self.module;
- std.mem.sort(Range, self.ranges.items, LessThanContext{
- .ty = ty,
- .module = mod,
- }, lessThan);
+ std.mem.sort(Range, self.ranges.items, mod, lessThan);
- if (!self.ranges.items[0].first.eql(first, ty, mod) or
- !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod))
+ if (self.ranges.items[0].first != first or
+ self.ranges.items[self.ranges.items.len - 1].last != last)
{
return false;
}
- var space: Value.BigIntSpace = undefined;
+ var space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
@@ -83,10 +91,10 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
- try counter.copy(prev.last.toBigInt(&space, mod));
+ try counter.copy(prev.last.toValue().toBigInt(&space, mod));
try counter.addScalar(&counter, 1);
- const cur_start_int = cur.first.toBigInt(&space, mod);
+ const cur_start_int = cur.first.toValue().toBigInt(&space, mod);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}
diff --git a/src/Sema.zig b/src/Sema.zig
index c351298511..61061279e4 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1609,7 +1609,7 @@ fn analyzeBodyInner(
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- const inline_body = if (cond.val.toBool(mod)) then_body else else_body;
+ const inline_body = if (cond.val.toBool()) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -1630,7 +1630,7 @@ fn analyzeBodyInner(
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- const inline_body = if (cond.val.toBool(mod)) then_body else else_body;
+ const inline_body = if (cond.val.toBool()) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src);
const old_runtime_index = block.runtime_index;
@@ -1663,7 +1663,7 @@ fn analyzeBodyInner(
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- if (is_non_err_val.toBool(mod)) {
+ if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -1689,7 +1689,7 @@ fn analyzeBodyInner(
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- if (is_non_err_val.toBool(mod)) {
+ if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -1778,12 +1778,11 @@ fn resolveConstBool(
zir_ref: Zir.Inst.Ref,
reason: []const u8,
) !bool {
- const mod = sema.mod;
const air_inst = try sema.resolveInst(zir_ref);
const wanted_type = Type.bool;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
- return val.toBool(mod);
+ return val.toBool();
}
pub fn resolveConstString(
@@ -2488,7 +2487,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
defer anon_decl.deinit();
const decl_index = try anon_decl.finish(
pointee_ty,
- Value.undef,
+ (try mod.intern(.{ .undef = pointee_ty.toIntern() })).toValue(),
alignment.toByteUnits(0),
);
sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index;
@@ -2611,7 +2610,7 @@ fn coerceResultPtr(
.@"addrspace" = addr_space,
});
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
- new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val);
+ new_ptr = try sema.addConstant(ptr_operand_ty, try mod.getCoerced(ptr_val, ptr_operand_ty));
} else {
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
}
@@ -3613,7 +3612,7 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai
// Detect if a comptime value simply needs to have its type changed.
if (try sema.resolveMaybeUndefVal(alloc)) |val| {
- return sema.addConstant(const_ptr_ty, val);
+ return sema.addConstant(const_ptr_ty, try mod.getCoerced(val, const_ptr_ty));
}
return block.addBitCast(const_ptr_ty, alloc);
@@ -3735,6 +3734,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
const decl = mod.declPtr(decl_index);
+ if (iac.is_const) try decl.intern(mod);
const final_elem_ty = decl.ty;
const final_ptr_ty = try mod.ptrType(.{
.elem_type = final_elem_ty.toIntern(),
@@ -3774,7 +3774,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
// Detect if the value is comptime-known. In such case, the
// last 3 AIR instructions of the block will look like this:
//
- // %a = interned
+ // %a = inferred_alloc
// %b = bitcast(%a)
// %c = store(%b, %d)
//
@@ -3814,22 +3814,22 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
}
};
- const const_inst = while (true) {
+ while (true) {
if (search_index == 0) break :ct;
search_index -= 1;
const candidate = block.instructions.items[search_index];
+ if (candidate == ptr_inst) break;
switch (air_tags[candidate]) {
.dbg_stmt, .dbg_block_begin, .dbg_block_end => continue,
- .interned => break candidate,
else => break :ct,
}
- };
+ }
const store_op = air_datas[store_inst].bin_op;
const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct;
if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct;
- if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct;
+ if (air_datas[bitcast_inst].ty_op.operand != ptr) break :ct;
const new_decl_index = d: {
var anon_decl = try block.startAnonDecl();
@@ -3850,7 +3850,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
sema.air_instructions.set(ptr_inst, .{
.tag = .interned,
.data = .{ .interned = try mod.intern(.{ .ptr = .{
- .ty = final_elem_ty.toIntern(),
+ .ty = final_ptr_ty.toIntern(),
.addr = .{ .decl = new_decl_index },
} }) },
});
@@ -4707,15 +4707,23 @@ fn zirValidateArrayInit(
return;
}
+ // If the array has one possible value, the value is always comptime-known.
+ if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| {
+ const array_init = try sema.addConstant(array_ty, array_opv);
+ try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
+ return;
+ }
+
var array_is_comptime = true;
var first_block_index = block.instructions.items.len;
var make_runtime = false;
// Collect the comptime element values in case the array literal ends up
// being comptime-known.
- const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod));
- const element_vals = try sema.arena.alloc(InternPool.Index, array_len_s);
- const opt_opv = try sema.typeHasOnePossibleValue(array_ty);
+ const element_vals = try sema.arena.alloc(
+ InternPool.Index,
+ try sema.usizeCast(block, init_src, array_len),
+ );
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
@@ -4727,12 +4735,6 @@ fn zirValidateArrayInit(
element_vals[i] = opv.toIntern();
continue;
}
- } else {
- // Array has one possible value, so value is always comptime-known
- if (opt_opv) |opv| {
- element_vals[i] = opv.toIntern();
- continue;
- }
}
const elem_ptr_air_ref = sema.inst_map.get(elem_ptr).?;
@@ -4814,11 +4816,6 @@ fn zirValidateArrayInit(
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
// instead a single `store` to the array_ptr with a comptime struct value.
- // Also to populate the sentinel value, if any.
- if (array_ty.sentinel(mod)) |sentinel_val| {
- element_vals[instrs.len] = sentinel_val.toIntern();
- }
-
block.instructions.shrinkRetainingCapacity(first_block_index);
var array_val = try mod.intern(.{ .aggregate = .{
@@ -6259,7 +6256,7 @@ fn popErrorReturnTrace(
if (operand != .none) {
is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand);
if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val|
- is_non_error = cond_val.toBool(mod);
+ is_non_error = cond_val.toBool();
} else is_non_error = true; // no operand means pop unconditionally
if (is_non_error == true) {
@@ -6873,14 +6870,15 @@ fn analyzeCall(
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
- var memoized_call_key: Module.MemoizedCall.Key = undefined;
+ var memoized_call_key = Module.MemoizedCall.Key{
+ .func = module_fn_index,
+ .args_index = @intCast(u32, mod.memoized_call_args.items.len),
+ .args_count = @intCast(u32, func_ty_info.param_types.len),
+ };
var delete_memoized_call_key = false;
- defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args);
+ defer if (delete_memoized_call_key) mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index);
if (is_comptime_call) {
- memoized_call_key = .{
- .func = module_fn_index,
- .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len),
- };
+ try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count);
delete_memoized_call_key = true;
}
@@ -6916,8 +6914,7 @@ fn analyzeCall(
uncasted_args,
is_comptime_call,
&should_memoize,
- memoized_call_key,
- func_ty_info.param_types,
+ mod.typeToFunc(func_ty).?.param_types,
func,
&has_comptime_args,
) catch |err| switch (err) {
@@ -6934,8 +6931,7 @@ fn analyzeCall(
uncasted_args,
is_comptime_call,
&should_memoize,
- memoized_call_key,
- func_ty_info.param_types,
+ mod.typeToFunc(func_ty).?.param_types,
func,
&has_comptime_args,
);
@@ -6988,9 +6984,19 @@ fn analyzeCall(
// bug generating invalid LLVM IR.
const res2: Air.Inst.Ref = res2: {
if (should_memoize and is_comptime_call) {
- if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| {
- break :res2 try sema.addConstant(fn_ret_ty, result.val);
+ const gop = try mod.memoized_calls.getOrPutContext(
+ gpa,
+ memoized_call_key,
+ .{ .args = &mod.memoized_call_args },
+ );
+ if (gop.found_existing) {
+ // We need to use the original memoized error set instead of fn_ret_ty.
+ const result = gop.value_ptr.*;
+ assert(result != .none); // recursive memoization?
+ break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue());
}
+ gop.value_ptr.* = .none;
+ delete_memoized_call_key = false;
}
const new_func_resolved_ty = try mod.funcType(new_fn_info);
@@ -7049,26 +7055,10 @@ fn analyzeCall(
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
-
- // TODO: check whether any external comptime memory was mutated by the
- // comptime function call. If so, then do not memoize the call here.
- // TODO: re-evaluate whether memoized_calls needs its own arena. I think
- // it should be fine to use the Decl arena for the function.
- {
- var arena_allocator = std.heap.ArenaAllocator.init(gpa);
- errdefer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- for (memoized_call_key.args) |*arg| {
- arg.* = try arg.*.copy(arena);
- }
-
- try mod.memoized_calls.putContext(gpa, memoized_call_key, .{
- .val = try result_val.copy(arena),
- .arena = arena_allocator.state,
- }, .{ .module = mod });
- delete_memoized_call_key = false;
- }
+ mod.memoized_calls.getPtrContext(
+ memoized_call_key,
+ .{ .args = &mod.memoized_call_args },
+ ).?.* = try result_val.intern(fn_ret_ty, mod);
}
break :res2 result;
@@ -7214,11 +7204,11 @@ fn analyzeInlineCallArg(
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
should_memoize: *bool,
- memoized_call_key: Module.MemoizedCall.Key,
raw_param_types: []const InternPool.Index,
func_inst: Air.Inst.Ref,
has_comptime_args: *bool,
) !void {
+ const mod = sema.mod;
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
.param_comptime, .param_anytype_comptime => has_comptime_args.* = true,
@@ -7276,11 +7266,8 @@ fn analyzeInlineCallArg(
try sema.resolveLazyValue(arg_val);
},
}
- should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod);
- memoized_call_key.args[arg_i.*] = .{
- .ty = param_ty.toType(),
- .val = arg_val,
- };
+ should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod);
+ mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(param_ty.toType(), mod));
} else {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
@@ -7315,11 +7302,8 @@ fn analyzeInlineCallArg(
try sema.resolveLazyValue(arg_val);
},
}
- should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod);
- memoized_call_key.args[arg_i.*] = .{
- .ty = sema.typeOf(uncasted_arg),
- .val = arg_val,
- };
+ should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod);
+ mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(sema.typeOf(uncasted_arg), mod));
} else {
if (zir_tags[inst] == .param_anytype_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
@@ -8279,7 +8263,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const int_tag_ty = try enum_tag_ty.intTagType(mod);
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
- return sema.addConstant(int_tag_ty, opv);
+ return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty));
}
if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| {
@@ -8310,7 +8294,10 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (dest_ty.isNonexhaustiveEnum(mod)) {
const int_tag_ty = try dest_ty.intTagType(mod);
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
- return sema.addConstant(dest_ty, int_val);
+ return sema.addConstant(dest_ty, (try mod.intern(.{ .enum_tag = .{
+ .ty = dest_ty.toIntern(),
+ .int = int_val.toIntern(),
+ } })).toValue());
}
const msg = msg: {
const msg = try sema.errMsg(
@@ -8657,8 +8644,10 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air
const result_ty = operand_ty.errorUnionSet(mod);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- assert(val.getError(mod) != null);
- return sema.addConstant(result_ty, val);
+ return sema.addConstant(result_ty, (try mod.intern(.{ .err = .{
+ .ty = result_ty.toIntern(),
+ .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -10737,7 +10726,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
block,
&range_set,
item_ref,
- operand_ty,
src_node_offset,
.{ .scalar = scalar_i },
);
@@ -10760,7 +10748,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
block,
&range_set,
item_ref,
- operand_ty,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
@@ -10778,7 +10765,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
&range_set,
item_first,
item_last,
- operand_ty,
src_node_offset,
.{ .range = .{ .prong = multi_i, .item = range_i } },
);
@@ -10792,7 +10778,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (operand_ty.zigTypeTag(mod) == .Int) {
const min_int = try operand_ty.minInt(mod, operand_ty);
const max_int = try operand_ty.maxInt(mod, operand_ty);
- if (try range_set.spans(min_int, max_int, operand_ty)) {
+ if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
if (special_prong == .@"else") {
return sema.fail(
block,
@@ -10894,11 +10880,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
);
}
- var seen_values = ValueSrcMap.initContext(gpa, .{
- .ty = operand_ty,
- .mod = mod,
- });
- defer seen_values.deinit();
+ var seen_values = ValueSrcMap{};
+ defer seen_values.deinit(gpa);
var extra_index: usize = special.end;
{
@@ -11664,10 +11647,10 @@ const RangeSetUnhandledIterator = struct {
it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty);
}
it.first = false;
- if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) {
+ if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first.toValue(), it.ty, it.sema.mod)) {
return it.cur;
}
- it.cur = it.ranges[it.range_i].last;
+ it.cur = it.ranges[it.range_i].last.toValue();
}
if (!it.first) {
it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty);
@@ -11687,16 +11670,15 @@ fn resolveSwitchItemVal(
switch_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
range_expand: Module.SwitchProngSrc.RangeExpand,
-) CompileError!TypedValue {
+) CompileError!InternPool.Index {
const mod = sema.mod;
const item = try sema.resolveInst(item_ref);
- const item_ty = sema.typeOf(item);
// Constructing a LazySrcLoc is costly because we only have the switch AST node.
// Only if we know for sure we need to report a compile error do we resolve the
// full source locations.
if (sema.resolveConstValue(block, .unneeded, item, "")) |val| {
try sema.resolveLazyValue(val);
- return TypedValue{ .ty = item_ty, .val = val };
+ return val.toIntern();
} else |err| switch (err) {
error.NeededSourceLocation => {
const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand);
@@ -11713,18 +11695,17 @@ fn validateSwitchRange(
range_set: *RangeSet,
first_ref: Zir.Inst.Ref,
last_ref: Zir.Inst.Ref,
- operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const mod = sema.mod;
- const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val;
- const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val;
- if (first_val.compareScalar(.gt, last_val, operand_ty, mod)) {
+ const first = try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first);
+ const last = try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last);
+ if (first.toValue().compareScalar(.gt, last.toValue(), mod.intern_pool.typeOf(first).toType(), mod)) {
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first);
return sema.fail(block, src, "range start value is greater than the end value", .{});
}
- const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src);
+ const maybe_prev_src = try range_set.add(first, last, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
@@ -11733,12 +11714,11 @@ fn validateSwitchItem(
block: *Block,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
- operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
- const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
- const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src);
+ const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ const maybe_prev_src = try range_set.add(item, item, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
@@ -11751,9 +11731,11 @@ fn validateSwitchItemEnum(
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
- const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
- const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse {
- const maybe_prev_src = try range_set.add(item_tv.val, item_tv.val, item_tv.ty, switch_prong_src);
+ const ip = &sema.mod.intern_pool;
+ const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ const int = ip.indexToKey(item).enum_tag.int;
+ const field_index = ip.indexToKey(ip.typeOf(item)).enum_type.tagValueIndex(ip, int) orelse {
+ const maybe_prev_src = try range_set.add(int, int, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
};
const maybe_prev_src = seen_fields[field_index];
@@ -11770,9 +11752,9 @@ fn validateSwitchItemError(
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const ip = &sema.mod.intern_pool;
- const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
// TODO: Do i need to typecheck here?
- const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.toIntern()).err.name);
+ const error_name = ip.stringToSlice(ip.indexToKey(item).err.name);
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
prev.value
else
@@ -11822,8 +11804,8 @@ fn validateSwitchItemBool(
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const mod = sema.mod;
- const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
- if (item_val.toBool(mod)) {
+ const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ if (item.toValue().toBool()) {
true_count.* += 1;
} else {
false_count.* += 1;
@@ -11835,7 +11817,7 @@ fn validateSwitchItemBool(
}
}
-const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage);
+const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc);
fn validateSwitchItemSparse(
sema: *Sema,
@@ -11845,8 +11827,8 @@ fn validateSwitchItemSparse(
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
- const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
- const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return;
+ const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
+ const kv = (try seen_values.fetchPut(sema.gpa, item, switch_prong_src)) orelse return;
return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset);
}
@@ -13047,8 +13029,6 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
- const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null);
-
const lhs_sub_val = if (lhs_ty.isSinglePointer(mod))
(try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).?
else
@@ -13065,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
}
- const element_vals = try sema.arena.alloc(InternPool.Index, final_len_including_sent);
+ const element_vals = try sema.arena.alloc(InternPool.Index, result_len);
var elem_i: usize = 0;
while (elem_i < result_len) {
var lhs_i: usize = 0;
@@ -13075,9 +13055,6 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
elem_i += 1;
}
}
- if (lhs_info.sentinel) |sent_val| {
- element_vals[result_len] = sent_val.toIntern();
- }
break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.toIntern(),
.storage = .{ .elems = element_vals },
@@ -14896,13 +14873,18 @@ fn analyzeArithmetic(
.ComptimeInt, .Int => try mod.intValue(scalar_type, 0),
else => unreachable,
};
+ const scalar_one = switch (scalar_tag) {
+ .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0),
+ .ComptimeInt, .Int => try mod.intValue(scalar_type, 1),
+ else => unreachable,
+ };
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
- if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
+ if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
return casted_rhs;
}
}
@@ -14916,7 +14898,7 @@ fn analyzeArithmetic(
const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
- if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
+ if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) {
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
@@ -15524,7 +15506,7 @@ fn cmpSelf(
} else {
if (resolved_type.zigTypeTag(mod) == .Bool) {
// We can lower bool eq/neq more efficiently.
- return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(mod), rhs_src);
+ return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
}
break :src rhs_src;
}
@@ -15534,7 +15516,7 @@ fn cmpSelf(
if (resolved_type.zigTypeTag(mod) == .Bool) {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
- return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src);
+ return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
}
}
break :src lhs_src;
@@ -15840,6 +15822,7 @@ fn zirBuiltinSrc(
break :blk try mod.intern(.{ .ptr = .{
.ty = .slice_const_u8_sentinel_0_type,
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
@@ -15864,6 +15847,7 @@ fn zirBuiltinSrc(
break :blk try mod.intern(.{ .ptr = .{
.ty = .slice_const_u8_sentinel_0_type,
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
@@ -16314,6 +16298,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :v try mod.intern(.{ .ptr = .{
.ty = slice_errors_ty.toIntern(),
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, vals.len)).toIntern(),
} });
} else .none;
const errors_val = try mod.intern(.{ .opt = .{
@@ -16438,6 +16423,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.is_const = true,
})).toIntern(),
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(),
} });
};
@@ -17141,7 +17127,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (try sema.resolveMaybeUndefVal(operand)) |val| {
return if (val.isUndef(mod))
sema.addConstUndef(Type.bool)
- else if (val.toBool(mod))
+ else if (val.toBool())
Air.Inst.Ref.bool_false
else
Air.Inst.Ref.bool_true;
@@ -17169,9 +17155,9 @@ fn zirBoolBr(
const gpa = sema.gpa;
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
- if (is_bool_or and lhs_val.toBool(mod)) {
+ if (is_bool_or and lhs_val.toBool()) {
return Air.Inst.Ref.bool_true;
- } else if (!is_bool_or and !lhs_val.toBool(mod)) {
+ } else if (!is_bool_or and !lhs_val.toBool()) {
return Air.Inst.Ref.bool_false;
}
// comptime-known left-hand side. No need for a block here; the result
@@ -17215,9 +17201,9 @@ fn zirBoolBr(
const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst);
if (!sema.typeOf(rhs_result).isNoReturn(mod)) {
if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| {
- if (is_bool_or and rhs_val.toBool(mod)) {
+ if (is_bool_or and rhs_val.toBool()) {
return Air.Inst.Ref.bool_true;
- } else if (!is_bool_or and !rhs_val.toBool(mod)) {
+ } else if (!is_bool_or and !rhs_val.toBool()) {
return Air.Inst.Ref.bool_false;
}
}
@@ -17371,7 +17357,7 @@ fn zirCondbr(
const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
- const body = if (cond_val.toBool(mod)) then_body else else_body;
+ const body = if (cond_val.toBool()) then_body else else_body;
try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src);
// We use `analyzeBodyInner` since we want to propagate any possible
@@ -17444,7 +17430,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
- if (is_non_err_val.toBool(mod)) {
+ if (is_non_err_val.toBool()) {
return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false);
}
// We can analyze the body directly in the parent block because we know there are
@@ -17491,7 +17477,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
- if (is_non_err_val.toBool(mod)) {
+ if (is_non_err_val.toBool()) {
return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false);
}
// We can analyze the body directly in the parent block because we know there are
@@ -18858,7 +18844,7 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand = try sema.resolveInst(inst_data.operand);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(Type.u1);
- if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1));
+ if (val.toBool()) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1));
return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0));
}
return block.addUnOp(.bool_to_int, operand);
@@ -19171,12 +19157,12 @@ fn zirReify(
const ty = try mod.ptrType(.{
.size = ptr_size,
- .is_const = is_const_val.toBool(mod),
- .is_volatile = is_volatile_val.toBool(mod),
+ .is_const = is_const_val.toBool(),
+ .is_volatile = is_volatile_val.toBool(),
.alignment = abi_align,
.address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val),
.elem_type = elem_ty.toIntern(),
- .is_allowzero = is_allowzero_val.toBool(mod),
+ .is_allowzero = is_allowzero_val.toBool(),
.sentinel = actual_sentinel,
});
return sema.addType(ty);
@@ -19267,7 +19253,7 @@ fn zirReify(
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
}
- return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod));
+ return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
},
.Enum => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
@@ -19305,7 +19291,7 @@ fn zirReify(
.namespace = .none,
.fields_len = fields_len,
.has_values = true,
- .tag_mode = if (!is_exhaustive_val.toBool(mod))
+ .tag_mode = if (!is_exhaustive_val.toBool())
.nonexhaustive
else
.explicit,
@@ -19619,12 +19605,12 @@ fn zirReify(
const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("return_type").?);
const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("params").?);
- const is_generic = is_generic_val.toBool(mod);
+ const is_generic = is_generic_val.toBool();
if (is_generic) {
return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{});
}
- const is_var_args = is_var_args_val.toBool(mod);
+ const is_var_args = is_var_args_val.toBool();
const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val);
if (is_var_args and cc != .C) {
return sema.fail(block, src, "varargs functions must have C calling convention", .{});
@@ -19653,9 +19639,9 @@ fn zirReify(
const arg_val = arg.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// is_generic: bool,
- const arg_is_generic = arg_val[0].toBool(mod);
+ const arg_is_generic = arg_val[0].toBool();
// is_noalias: bool,
- const arg_is_noalias = arg_val[1].toBool(mod);
+ const arg_is_noalias = arg_val[1].toBool();
// type: ?type,
const param_type_opt_val = arg_val[2];
@@ -19783,9 +19769,9 @@ fn reifyStruct(
if (layout == .Packed) {
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
- if (is_comptime_val.toBool(mod)) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
+ if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
}
- if (layout == .Extern and is_comptime_val.toBool(mod)) {
+ if (layout == .Extern and is_comptime_val.toBool()) {
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
}
@@ -19827,7 +19813,7 @@ fn reifyStruct(
opt_val;
break :blk try payload_val.copy(new_decl_arena_allocator);
} else Value.@"unreachable";
- if (is_comptime_val.toBool(mod) and default_val.toIntern() == .unreachable_value) {
+ if (is_comptime_val.toBool() and default_val.toIntern() == .unreachable_value) {
return sema.fail(block, src, "comptime field without default initialization value", .{});
}
@@ -19836,7 +19822,7 @@ fn reifyStruct(
.ty = field_ty,
.abi_align = abi_align,
.default_val = default_val,
- .is_comptime = is_comptime_val.toBool(mod),
+ .is_comptime = is_comptime_val.toBool(),
.offset = undefined,
};
@@ -20400,13 +20386,17 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) {
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
}
- if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) {
- return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{
- .ty = dest_ty.toIntern(),
- .val = operand_val.toIntern(),
- } })).toValue());
- }
- return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty));
+ return sema.addConstant(aligned_dest_ty, try mod.getCoerced(switch (mod.intern_pool.indexToKey(operand_val.toIntern())) {
+ .undef, .ptr => operand_val,
+ .opt => |opt| switch (opt.val) {
+ .none => if (dest_ty.ptrAllowsZero(mod))
+ Value.zero_usize
+ else
+ return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}),
+ else => opt.val.toValue(),
+ },
+ else => unreachable,
+ }, aligned_dest_ty));
}
try sema.requireRuntimeBlock(block, src, null);
@@ -20534,10 +20524,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(dest_ty);
if (!is_vector) {
- return sema.addConstant(
- dest_ty,
+ return sema.addConstant(dest_ty, try mod.getCoerced(
try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod),
- );
+ dest_ty,
+ ));
}
const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod));
for (elems, 0..) |*elem, i| {
@@ -21410,7 +21400,10 @@ fn zirCmpxchg(
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
- return sema.addConstant(result_ty, Value.null);
+ return sema.addConstant(result_ty, (try mod.intern(.{ .opt = .{
+ .ty = result_ty.toIntern(),
+ .val = .none,
+ } })).toValue());
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
@@ -21633,8 +21626,7 @@ fn analyzeShuffle(
.{ b_len, b_src, b_ty },
};
- var i: usize = 0;
- while (i < mask_len) : (i += 1) {
+ for (0..@intCast(usize, mask_len)) |i| {
const elem = try mask.elemValue(sema.mod, i);
if (elem.isUndef(mod)) continue;
const int = elem.toSignedInt(mod);
@@ -21670,7 +21662,7 @@ fn analyzeShuffle(
if (try sema.resolveMaybeUndefVal(a)) |a_val| {
if (try sema.resolveMaybeUndefVal(b)) |b_val| {
const values = try sema.arena.alloc(InternPool.Index, mask_len);
- for (values) |*value| {
+ for (values, 0..) |*value, i| {
const mask_elem_val = try mask.elemValue(sema.mod, i);
if (mask_elem_val.isUndef(mod)) {
value.* = try mod.intern(.{ .undef = elem_ty.toIntern() });
@@ -21698,11 +21690,10 @@ fn analyzeShuffle(
const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len));
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
- i = 0;
- while (i < min_len) : (i += 1) {
+ for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| {
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern();
}
- while (i < max_len) : (i += 1) {
+ for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| {
expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern();
}
const expand_mask = try mod.intern(.{ .aggregate = .{
@@ -21783,7 +21774,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const elems = try sema.gpa.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
const pred_elem_val = try pred_val.elemValue(mod, i);
- const should_choose_a = pred_elem_val.toBool(mod);
+ const should_choose_a = pred_elem_val.toBool();
elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod);
}
@@ -22853,15 +22844,15 @@ fn zirVarExtended(
else
uncasted_init;
- break :blk (try sema.resolveMaybeUndefVal(init)) orelse
- return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known");
- } else Value.@"unreachable";
+ break :blk ((try sema.resolveMaybeUndefVal(init)) orelse
+ return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known")).toIntern();
+ } else .none;
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{
.ty = var_ty.toIntern(),
- .init = init_val.toIntern(),
+ .init = init_val,
.decl = sema.owner_decl_index,
.lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString(
sema.gpa,
@@ -23284,7 +23275,7 @@ fn resolveExternOptions(
.name = name,
.library_name = library_name,
.linkage = linkage,
- .is_thread_local = is_thread_local_val.toBool(mod),
+ .is_thread_local = is_thread_local_val.toBool(),
};
}
@@ -26190,7 +26181,7 @@ fn coerceExtra(
.addr = .{ .int = (if (dest_info.@"align" != 0)
try mod.intValue(Type.usize, dest_info.@"align")
else
- try dest_info.pointee_type.lazyAbiAlignment(mod)).toIntern() },
+ try mod.getCoerced(try dest_info.pointee_type.lazyAbiAlignment(mod), Type.usize)).toIntern() },
.len = (try mod.intValue(Type.usize, 0)).toIntern(),
} })).toValue());
}
@@ -27785,7 +27776,7 @@ fn beginComptimePtrMutation(
const payload = try arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .eu_payload },
- .data = Value.undef,
+ .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
};
val_ptr.* = Value.initPayload(&payload.base);
@@ -27824,7 +27815,7 @@ fn beginComptimePtrMutation(
const payload = try arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .opt_payload },
- .data = Value.undef,
+ .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
};
val_ptr.* = Value.initPayload(&payload.base);
@@ -27898,30 +27889,6 @@ fn beginComptimePtrMutation(
}
switch (val_ptr.ip_index) {
- .undef => {
- // An array has been initialized to undefined at comptime and now we
- // are for the first time setting an element. We must change the representation
- // of the array from `undef` to `array`.
- const arena = parent.beginArena(sema.mod);
- defer parent.finishArena(sema.mod);
-
- const array_len_including_sentinel =
- try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
- const elems = try arena.alloc(Value, array_len_including_sentinel);
- @memset(elems, Value.undef);
-
- val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
-
- return beginComptimePtrMutationInner(
- sema,
- block,
- src,
- elem_ty,
- &elems[elem_ptr.index],
- ptr_elem_ty,
- parent.mut_decl,
- );
- },
.none => switch (val_ptr.tag()) {
.bytes => {
// An array is memory-optimized to store a slice of bytes, but we are about
@@ -27999,7 +27966,33 @@ fn beginComptimePtrMutation(
else => unreachable,
},
- else => unreachable,
+ else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
+ .undef => {
+ // An array has been initialized to undefined at comptime and now we
+ // are for the first time setting an element. We must change the representation
+ // of the array from `undef` to `array`.
+ const arena = parent.beginArena(sema.mod);
+ defer parent.finishArena(sema.mod);
+
+ const array_len_including_sentinel =
+ try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
+ const elems = try arena.alloc(Value, array_len_including_sentinel);
+ @memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue());
+
+ val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
+
+ return beginComptimePtrMutationInner(
+ sema,
+ block,
+ src,
+ elem_ty,
+ &elems[elem_ptr.index],
+ ptr_elem_ty,
+ parent.mut_decl,
+ );
+ },
+ else => unreachable,
+ },
}
},
else => {
@@ -28052,83 +28045,6 @@ fn beginComptimePtrMutation(
var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty);
switch (parent.pointee) {
.direct => |val_ptr| switch (val_ptr.ip_index) {
- .undef => {
- // A struct or union has been initialized to undefined at comptime and now we
- // are for the first time setting a field. We must change the representation
- // of the struct/union from `undef` to `struct`/`union`.
- const arena = parent.beginArena(sema.mod);
- defer parent.finishArena(sema.mod);
-
- switch (parent.ty.zigTypeTag(mod)) {
- .Struct => {
- const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
- @memset(fields, Value.undef);
-
- val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
-
- return beginComptimePtrMutationInner(
- sema,
- block,
- src,
- parent.ty.structFieldType(field_index, mod),
- &fields[field_index],
- ptr_elem_ty,
- parent.mut_decl,
- );
- },
- .Union => {
- const payload = try arena.create(Value.Payload.Union);
- const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
- payload.* = .{ .data = .{
- .tag = try mod.enumValueFieldIndex(tag_ty, field_index),
- .val = Value.undef,
- } };
-
- val_ptr.* = Value.initPayload(&payload.base);
-
- return beginComptimePtrMutationInner(
- sema,
- block,
- src,
- parent.ty.structFieldType(field_index, mod),
- &payload.data.val,
- ptr_elem_ty,
- parent.mut_decl,
- );
- },
- .Pointer => {
- assert(parent.ty.isSlice(mod));
- val_ptr.* = try Value.Tag.slice.create(arena, .{
- .ptr = Value.undef,
- .len = Value.undef,
- });
-
- switch (field_index) {
- Value.slice_ptr_index => return beginComptimePtrMutationInner(
- sema,
- block,
- src,
- parent.ty.slicePtrFieldType(mod),
- &val_ptr.castTag(.slice).?.data.ptr,
- ptr_elem_ty,
- parent.mut_decl,
- ),
- Value.slice_len_index => return beginComptimePtrMutationInner(
- sema,
- block,
- src,
- Type.usize,
- &val_ptr.castTag(.slice).?.data.len,
- ptr_elem_ty,
- parent.mut_decl,
- ),
-
- else => unreachable,
- }
- },
- else => unreachable,
- }
- },
.empty_struct => {
const duped = try sema.arena.create(Value);
duped.* = val_ptr.*;
@@ -28210,10 +28126,92 @@ fn beginComptimePtrMutation(
else => unreachable,
},
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) {
+ .undef => {
+ // A struct or union has been initialized to undefined at comptime and now we
+ // are for the first time setting a field. We must change the representation
+ // of the struct/union from `undef` to `struct`/`union`.
+ const arena = parent.beginArena(sema.mod);
+ defer parent.finishArena(sema.mod);
+
+ switch (parent.ty.zigTypeTag(mod)) {
+ .Struct => {
+ const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
+ for (fields, 0..) |*field, i| field.* = (try mod.intern(.{
+ .undef = parent.ty.structFieldType(i, mod).toIntern(),
+ })).toValue();
+
+ val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
+
+ return beginComptimePtrMutationInner(
+ sema,
+ block,
+ src,
+ parent.ty.structFieldType(field_index, mod),
+ &fields[field_index],
+ ptr_elem_ty,
+ parent.mut_decl,
+ );
+ },
+ .Union => {
+ const payload = try arena.create(Value.Payload.Union);
+ const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
+ const payload_ty = parent.ty.structFieldType(field_index, mod);
+ payload.* = .{ .data = .{
+ .tag = try mod.enumValueFieldIndex(tag_ty, field_index),
+ .val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
+ } };
+ val_ptr.* = Value.initPayload(&payload.base);
+
+ return beginComptimePtrMutationInner(
+ sema,
+ block,
+ src,
+ payload_ty,
+ &payload.data.val,
+ ptr_elem_ty,
+ parent.mut_decl,
+ );
+ },
+ .Pointer => {
+ assert(parent.ty.isSlice(mod));
+ const ptr_ty = parent.ty.slicePtrFieldType(mod);
+ val_ptr.* = try Value.Tag.slice.create(arena, .{
+ .ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(),
+ .len = (try mod.intern(.{ .undef = .usize_type })).toValue(),
+ });
+
+ switch (field_index) {
+ Value.slice_ptr_index => return beginComptimePtrMutationInner(
+ sema,
+ block,
+ src,
+ ptr_ty,
+ &val_ptr.castTag(.slice).?.data.ptr,
+ ptr_elem_ty,
+ parent.mut_decl,
+ ),
+ Value.slice_len_index => return beginComptimePtrMutationInner(
+ sema,
+ block,
+ src,
+ Type.usize,
+ &val_ptr.castTag(.slice).?.data.len,
+ ptr_elem_ty,
+ parent.mut_decl,
+ ),
+
+ else => unreachable,
+ }
+ },
+ else => unreachable,
+ }
+ },
else => unreachable,
},
- else => unreachable,
},
.reinterpret => |reinterpret| {
const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod);
@@ -28370,18 +28368,22 @@ fn beginComptimePtrLoad(
(try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
if (coerce_in_mem_ok) {
- const payload_val = switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
- .error_union => |error_union| switch (error_union.val) {
- .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}),
- .payload => |payload| payload,
- },
- .opt => |opt| switch (opt.val) {
- .none => return sema.fail(block, src, "attempt to use null value", .{}),
- else => opt.val,
- },
- else => unreachable,
+ const payload_val = switch (tv.val.ip_index) {
+ .none => tv.val.cast(Value.Payload.SubValue).?.data,
+ .null_value => return sema.fail(block, src, "attempt to use null value", .{}),
+ else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}),
+ .payload => |payload| payload,
+ },
+ .opt => |opt| switch (opt.val) {
+ .none => return sema.fail(block, src, "attempt to use null value", .{}),
+ else => opt.val,
+ },
+ else => unreachable,
+ }.toValue(),
};
- tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() };
+ tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
break :blk deref;
}
}
@@ -28960,7 +28962,7 @@ fn coerceArrayLike(
if (in_memory_result == .ok) {
if (try sema.resolveMaybeUndefVal(inst)) |inst_val| {
// These types share the same comptime value representation.
- return sema.addConstant(dest_ty, inst_val);
+ return sema.addConstant(dest_ty, try mod.getCoerced(inst_val, dest_ty));
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addBitCast(dest_ty, inst);
@@ -29024,7 +29026,7 @@ fn coerceTupleToArray(
return sema.failWithOwnedErrorMsg(msg);
}
- const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod));
+ const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len);
const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
const dest_elem_ty = dest_ty.childType(mod);
@@ -29430,7 +29432,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo
const ptr_ty = try mod.ptrType(.{
.elem_type = decl_tv.ty.toIntern(),
.alignment = InternPool.Alignment.fromByteUnits(decl.@"align"),
- .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else false,
+ .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true,
.address_space = decl.@"addrspace",
});
if (analyze_fn_body) {
@@ -29513,7 +29515,7 @@ fn analyzeLoad(
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| {
- return sema.addConstant(elem_ty, elem_val);
+ return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty));
}
}
@@ -32610,8 +32612,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var int_tag_ty: Type = undefined;
var enum_field_names: []InternPool.NullTerminatedString = &.{};
- var enum_field_vals: []InternPool.Index = &.{};
- var enum_field_vals_map: std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false) = .{};
+ var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
var explicit_tags_seen: []bool = &.{};
var explicit_enum_info: ?InternPool.Key.EnumType = null;
if (tag_type_ref != .none) {
@@ -32638,9 +32639,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
};
return sema.failWithOwnedErrorMsg(msg);
}
+ enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
+ try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len);
}
- enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
- enum_field_vals = try sema.arena.alloc(InternPool.Index, fields_len);
} else {
// The provided type is the enum tag type.
union_obj.tag_ty = provided_ty;
@@ -32712,8 +32713,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
break :blk try sema.resolveInst(tag_ref);
} else .none;
- if (enum_field_vals.len != 0) {
- const copied_val = if (tag_ref != .none) blk: {
+ if (enum_field_vals.capacity() > 0) {
+ const enum_tag_val = if (tag_ref != .none) blk: {
const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
@@ -32737,16 +32738,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
break :blk val;
};
- enum_field_vals[field_i] = copied_val.toIntern();
- const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{
- .ty = int_tag_ty,
- .mod = mod,
- });
+ const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern());
if (gop.found_existing) {
const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy;
const msg = msg: {
- const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)});
+ const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)});
errdefer msg.destroy(gpa);
try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{});
break :msg msg;
@@ -32907,8 +32904,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
};
return sema.failWithOwnedErrorMsg(msg);
}
- } else if (enum_field_vals.len != 0) {
- union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals, union_obj);
+ } else if (enum_field_vals.count() > 0) {
+ union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_obj);
} else {
union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj);
}
@@ -33180,8 +33177,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.struct_type => |struct_type| {
const resolved_ty = try sema.resolveTypeFields(ty);
if (mod.structPtrUnwrap(struct_type.index)) |s| {
- for (s.fields.values(), 0..) |field, i| {
- if (field.is_comptime) continue;
+ const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count());
+ for (field_vals, s.fields.values(), 0..) |*field_val, field, i| {
+ if (field.is_comptime) {
+ field_val.* = try field.default_val.intern(field.ty, mod);
+ continue;
+ }
if (field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
@@ -33192,24 +33193,25 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{});
return sema.failWithOwnedErrorMsg(msg);
}
- if ((try sema.typeHasOnePossibleValue(field.ty)) == null) {
- return null;
- }
+ if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| {
+ field_val.* = try field_opv.intern(field.ty, mod);
+ } else return null;
}
+
+ // In this case the struct has no runtime-known fields and
+ // therefore has one possible value.
+ return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .elems = field_vals },
+ } })).toValue();
}
- // In this case the struct has no runtime-known fields and
- // therefore has one possible value.
- // TODO: this is incorrect for structs with comptime fields, I think
- // we should use a temporary allocator to construct an aggregate that
- // is populated with the comptime values and then intern that value here.
- // This TODO is repeated in the redundant implementation of
- // one-possible-value in type.zig.
- const empty = try mod.intern(.{ .aggregate = .{
+ // In this case the struct has no fields at all and
+ // therefore has one possible value.
+ return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } });
- return empty.toValue();
+ } })).toValue();
},
.anon_struct_type => |tuple| {
@@ -33268,20 +33270,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
},
.auto, .explicit => switch (enum_type.names.len) {
0 => return Value.@"unreachable",
- 1 => {
- if (enum_type.values.len == 0) {
- const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = try mod.intern(.{ .int = .{
- .ty = enum_type.tag_ty,
- .storage = .{ .u64 = 0 },
- } }),
- } });
- return only.toValue();
- } else {
- return enum_type.values[0].toValue();
- }
- },
+ 1 => return try mod.getCoerced((if (enum_type.values.len == 0)
+ try mod.intern(.{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = 0 },
+ } })
+ else
+ enum_type.values[0]).toValue(), ty),
else => return null,
},
},
@@ -33427,7 +33422,7 @@ fn analyzeComptimeAlloc(
// There will be stores before the first load, but they may be to sub-elements or
// sub-fields. So we need to initialize with undef to allow the mechanism to expand
// into fields/elements and have those overridden with stored values.
- Value.undef,
+ (try sema.mod.intern(.{ .undef = var_type.toIntern() })).toValue(),
alignment,
);
const decl = sema.mod.declPtr(decl_index);
@@ -34028,16 +34023,16 @@ fn intSubWithOverflow(
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
- of.* = try of_math_result.overflow_bit.intern(Type.bool, mod);
+ of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return Value.OverflowArithmeticResult{
.overflow_bit = (try mod.intern(.{ .aggregate = .{
- .ty = ty.toIntern(),
+ .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
} })).toValue(),
.wrapped_result = (try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .ty = ty.toIntern(),
.storage = .{ .elems = result_data },
} })).toValue(),
};
@@ -34066,7 +34061,7 @@ fn intSubWithOverflowScalar(
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflow_bit = Value.boolToInt(overflowed),
+ .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
.wrapped_result = wrapped_result,
};
}
@@ -34273,16 +34268,16 @@ fn intAddWithOverflow(
const lhs_elem = try lhs.elemValue(sema.mod, i);
const rhs_elem = try rhs.elemValue(sema.mod, i);
const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty);
- of.* = try of_math_result.overflow_bit.intern(Type.bool, mod);
+ of.* = try of_math_result.overflow_bit.intern(Type.u1, mod);
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return Value.OverflowArithmeticResult{
.overflow_bit = (try mod.intern(.{ .aggregate = .{
- .ty = ty.toIntern(),
+ .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
} })).toValue(),
.wrapped_result = (try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
+ .ty = ty.toIntern(),
.storage = .{ .elems = result_data },
} })).toValue(),
};
@@ -34311,7 +34306,7 @@ fn intAddWithOverflowScalar(
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits);
const result = try mod.intValue_big(ty, result_bigint.toConst());
return Value.OverflowArithmeticResult{
- .overflow_bit = Value.boolToInt(overflowed),
+ .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
.wrapped_result = result,
};
}
@@ -34384,7 +34379,7 @@ fn compareVector(
scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod);
}
return (try mod.intern(.{ .aggregate = .{
- .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type })).toIntern(),
+ .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
.storage = .{ .elems = result_data },
} })).toValue();
}
diff --git a/src/codegen.zig b/src/codegen.zig
index 1ae6d6ce06..30ad8ab6e8 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -957,7 +957,7 @@ pub fn genTypedValue(
}
},
.Bool => {
- return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool(mod)) });
+ return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
},
.Optional => {
if (typed_value.ty.isPtrLikeOptional(mod)) {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 1d3749f6a3..41db3e7a04 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -2003,7 +2003,7 @@ pub const Object = struct {
mod.intern_pool.stringToSlice(tuple.names[i])
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
- defer gpa.free(field_name);
+ defer if (tuple.names.len == 0) gpa.free(field_name);
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
@@ -2461,13 +2461,13 @@ pub const DeclGen = struct {
if (decl.@"linksection") |section| global.setSection(section);
assert(decl.has_tv);
const init_val = if (decl.val.getVariable(mod)) |variable| init_val: {
- break :init_val variable.init.toValue();
+ break :init_val variable.init;
} else init_val: {
global.setGlobalConstant(.True);
- break :init_val decl.val;
+ break :init_val decl.val.toIntern();
};
- if (init_val.toIntern() != .unreachable_value) {
- const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val });
+ if (init_val != .none) {
+ const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() });
if (global.globalGetValueType() == llvm_init.typeOf()) {
global.setInitializer(llvm_init);
} else {
@@ -2748,7 +2748,7 @@ pub const DeclGen = struct {
if (std.debug.runtime_safety and false) check: {
if (t.zigTypeTag(mod) == .Opaque) break :check;
if (!t.hasRuntimeBits(mod)) break :check;
- if (!llvm_ty.isSized().toBool(mod)) break :check;
+ if (!llvm_ty.isSized().toBool()) break :check;
const zig_size = t.abiSize(mod);
const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty);
@@ -3239,7 +3239,7 @@ pub const DeclGen = struct {
=> unreachable, // non-runtime values
.false, .true => {
const llvm_type = try dg.lowerType(tv.ty);
- return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull();
+ return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
},
},
.variable,
@@ -3522,15 +3522,19 @@ pub const DeclGen = struct {
const elem_ty = vector_type.child.toType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len);
defer dg.gpa.free(llvm_elems);
+ const llvm_i8 = dg.context.intType(8);
for (llvm_elems, 0..) |*llvm_elem, i| {
- llvm_elem.* = try dg.lowerValue(.{
- .ty = elem_ty,
- .val = switch (aggregate.storage) {
- .bytes => unreachable,
- .elems => |elems| elems[i],
- .repeated_elem => |elem| elem,
- }.toValue(),
- });
+ llvm_elem.* = switch (aggregate.storage) {
+ .bytes => |bytes| llvm_i8.constInt(bytes[i], .False),
+ .elems => |elems| try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = elems[i].toValue(),
+ }),
+ .repeated_elem => |elem| try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = elem.toValue(),
+ }),
+ };
}
return llvm.constVector(
llvm_elems.ptr,
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 64a0a7ec57..94ea8b7f89 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -654,7 +654,7 @@ pub const DeclGen = struct {
.@"unreachable",
.generic_poison,
=> unreachable, // non-runtime values
- .false, .true => try self.addConstBool(val.toBool(mod)),
+ .false, .true => try self.addConstBool(val.toBool()),
},
.variable,
.extern_func,
@@ -974,7 +974,6 @@ pub const DeclGen = struct {
/// This function should only be called during function code generation.
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
const mod = self.module;
- const target = self.getTarget();
const result_ty_ref = try self.resolveType(ty, repr);
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
@@ -991,51 +990,8 @@ pub const DeclGen = struct {
return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
}
},
- .Bool => switch (repr) {
- .direct => return try self.spv.constBool(result_ty_ref, val.toBool(mod)),
- .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))),
- },
- .Float => return switch (ty.floatBits(target)) {
- 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }),
- 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }),
- 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }),
- 80, 128 => unreachable, // TODO
- else => unreachable,
- },
- .ErrorSet => {
- const value = switch (val.tag()) {
- .@"error" => blk: {
- const err_name = val.castTag(.@"error").?.data.name;
- const kv = try self.module.getErrorValue(err_name);
- break :blk @intCast(u16, kv.value);
- },
- .zero => 0,
- else => unreachable,
- };
-
- return try self.spv.constInt(result_ty_ref, value);
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- const is_pl = val.errorUnionIsPayload();
- const error_val = if (!is_pl) val else Value.initTag(.zero);
-
- const eu_layout = self.errorUnionLayout(payload_ty);
- if (!eu_layout.payload_has_bits) {
- return try self.constant(Type.anyerror, error_val, repr);
- }
-
- const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
-
- var members: [2]IdRef = undefined;
- if (eu_layout.error_first) {
- members[0] = try self.constant(Type.anyerror, error_val, .indirect);
- members[1] = try self.constant(payload_ty, payload_val, .indirect);
- } else {
- members[0] = try self.constant(payload_ty, payload_val, .indirect);
- members[1] = try self.constant(Type.anyerror, error_val, .indirect);
- }
- return try self.spv.constComposite(result_ty_ref, &members);
+ .Bool => {
+ @compileError("TODO merge conflict failure");
},
// TODO: We can handle most pointers here (decl refs etc), because now they emit an extra
// OpVariable that is not really required.
diff --git a/src/type.zig b/src/type.zig
index a9ad8b94fd..ebf331ef88 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2481,25 +2481,32 @@ pub const Type = struct {
.struct_type => |struct_type| {
if (mod.structPtrUnwrap(struct_type.index)) |s| {
assert(s.haveFieldTypes());
- for (s.fields.values()) |field| {
- if (field.is_comptime) continue;
- if ((try field.ty.onePossibleValue(mod)) != null) continue;
- return null;
+ const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count());
+ defer mod.gpa.free(field_vals);
+ for (field_vals, s.fields.values()) |*field_val, field| {
+ if (field.is_comptime) {
+ field_val.* = try field.default_val.intern(field.ty, mod);
+ continue;
+ }
+ if (try field.ty.onePossibleValue(mod)) |field_opv| {
+ field_val.* = try field_opv.intern(field.ty, mod);
+ } else return null;
}
+
+ // In this case the struct has no runtime-known fields and
+ // therefore has one possible value.
+ return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.toIntern(),
+ .storage = .{ .elems = field_vals },
+ } })).toValue();
}
- // In this case the struct has no runtime-known fields and
- // therefore has one possible value.
- // TODO: this is incorrect for structs with comptime fields, I think
- // we should use a temporary allocator to construct an aggregate that
- // is populated with the comptime values and then intern that value here.
- // This TODO is repeated in the redundant implementation of
- // one-possible-value logic in Sema.zig.
- const empty = try mod.intern(.{ .aggregate = .{
+ // In this case the struct has no fields at all and
+ // therefore has one possible value.
+ return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } });
- return empty.toValue();
+ } })).toValue();
},
.anon_struct_type => |tuple| {
diff --git a/src/value.zig b/src/value.zig
index 23b90f40df..dda95cbb44 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -385,7 +385,7 @@ pub const Value = struct {
} });
},
.aggregate => {
- const old_elems = val.castTag(.aggregate).?.data;
+ const old_elems = val.castTag(.aggregate).?.data[0..ty.arrayLen(mod)];
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
defer mod.gpa.free(new_elems);
const ty_key = mod.intern_pool.indexToKey(ty.toIntern());
@@ -656,7 +656,7 @@ pub const Value = struct {
};
}
- pub fn toBool(val: Value, _: *const Module) bool {
+ pub fn toBool(val: Value) bool {
return switch (val.toIntern()) {
.bool_true => true,
.bool_false => false,
@@ -697,7 +697,7 @@ pub const Value = struct {
switch (ty.zigTypeTag(mod)) {
.Void => {},
.Bool => {
- buffer[0] = @boolToInt(val.toBool(mod));
+ buffer[0] = @boolToInt(val.toBool());
},
.Int, .Enum => {
const int_info = ty.intInfo(mod);
@@ -736,13 +736,20 @@ pub const Value = struct {
},
.Struct => switch (ty.containerLayout(mod)) {
.Auto => return error.IllDefinedMemoryLayout,
- .Extern => {
- const fields = ty.structFields(mod).values();
- const field_vals = val.castTag(.aggregate).?.data;
- for (fields, 0..) |field, i| {
- const off = @intCast(usize, ty.structFieldOffset(i, mod));
- try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
- }
+ .Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
+ const off = @intCast(usize, ty.structFieldOffset(i, mod));
+ const field_val = switch (val.ip_index) {
+ .none => val.castTag(.aggregate).?.data[i],
+ else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| {
+ buffer[off] = bytes[i];
+ continue;
+ },
+ .elems => |elems| elems[i],
+ .repeated_elem => |elem| elem,
+ }.toValue(),
+ };
+ try writeToMemory(field_val, field.ty, mod, buffer[off..]);
},
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
@@ -812,7 +819,7 @@ pub const Value = struct {
.Little => bit_offset / 8,
.Big => buffer.len - bit_offset / 8 - 1,
};
- if (val.toBool(mod)) {
+ if (val.toBool()) {
buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8));
} else {
buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8));
@@ -1331,24 +1338,7 @@ pub const Value = struct {
.gt => {},
}
- const lhs_float = lhs.isFloat(mod);
- const rhs_float = rhs.isFloat(mod);
- if (lhs_float and rhs_float) {
- const lhs_tag = lhs.tag();
- const rhs_tag = rhs.tag();
- if (lhs_tag == rhs_tag) {
- const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage;
- const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage;
- const lhs128: f128 = switch (lhs_storage) {
- inline else => |x| x,
- };
- const rhs128: f128 = switch (rhs_storage) {
- inline else => |x| x,
- };
- return std.math.order(lhs128, rhs128);
- }
- }
- if (lhs_float or rhs_float) {
+ if (lhs.isFloat(mod) or rhs.isFloat(mod)) {
const lhs_f128 = lhs.toFloat(f128, mod);
const rhs_f128 = rhs.toFloat(f128, mod);
return std.math.order(lhs_f128, rhs_f128);
@@ -1669,86 +1659,6 @@ pub const Value = struct {
return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq);
}
- /// This function is used by hash maps and so treats floating-point NaNs as equal
- /// to each other, and not equal to other floating-point values.
- pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
- if (val.ip_index != .none) {
- // The InternPool data structure hashes based on Key to make interned objects
- // unique. An Index can be treated simply as u32 value for the
- // purpose of Type/Value hashing and equality.
- std.hash.autoHash(hasher, val.toIntern());
- return;
- }
- const zig_ty_tag = ty.zigTypeTag(mod);
- std.hash.autoHash(hasher, zig_ty_tag);
- if (val.isUndef(mod)) return;
- // The value is runtime-known and shouldn't affect the hash.
- if (val.isRuntimeValue(mod)) return;
-
- switch (zig_ty_tag) {
- .Opaque => unreachable, // Cannot hash opaque types
-
- .Void,
- .NoReturn,
- .Undefined,
- .Null,
- => {},
-
- .Type,
- .Float,
- .ComptimeFloat,
- .Bool,
- .Int,
- .ComptimeInt,
- .Pointer,
- .Optional,
- .ErrorUnion,
- .ErrorSet,
- .Enum,
- .EnumLiteral,
- .Fn,
- => unreachable, // handled via ip_index check above
- .Array, .Vector => {
- const len = ty.arrayLen(mod);
- const elem_ty = ty.childType(mod);
- var index: usize = 0;
- while (index < len) : (index += 1) {
- const elem_val = val.elemValue(mod, index) catch |err| switch (err) {
- // Will be solved when arrays and vectors get migrated to the intern pool.
- error.OutOfMemory => @panic("OOM"),
- };
- elem_val.hash(elem_ty, hasher, mod);
- }
- },
- .Struct => {
- switch (val.tag()) {
- .aggregate => {
- const field_values = val.castTag(.aggregate).?.data;
- for (field_values, 0..) |field_val, i| {
- const field_ty = ty.structFieldType(i, mod);
- field_val.hash(field_ty, hasher, mod);
- }
- },
- else => unreachable,
- }
- },
- .Union => {
- const union_obj = val.cast(Payload.Union).?.data;
- if (ty.unionTagType(mod)) |tag_ty| {
- union_obj.tag.hash(tag_ty, hasher, mod);
- }
- const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
- union_obj.val.hash(active_field_ty, hasher, mod);
- },
- .Frame => {
- @panic("TODO implement hashing frame values");
- },
- .AnyFrame => {
- @panic("TODO implement hashing anyframe values");
- },
- }
- }
-
/// This is a more conservative hash function that produces equal hashes for values
/// that can coerce into each other.
/// This function is used by hash maps and so treats floating-point NaNs as equal
@@ -1820,35 +1730,6 @@ pub const Value = struct {
}
}
- pub const ArrayHashContext = struct {
- ty: Type,
- mod: *Module,
-
- pub fn hash(self: @This(), val: Value) u32 {
- const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod };
- return @truncate(u32, other_context.hash(val));
- }
- pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool {
- _ = b_index;
- return a.eql(b, self.ty, self.mod);
- }
- };
-
- pub const HashContext = struct {
- ty: Type,
- mod: *Module,
-
- pub fn hash(self: @This(), val: Value) u64 {
- var hasher = std.hash.Wyhash.init(0);
- val.hash(self.ty, &hasher, self.mod);
- return hasher.final();
- }
-
- pub fn eql(self: @This(), a: Value, b: Value) bool {
- return a.eql(b, self.ty, self.mod);
- }
- };
-
pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
@@ -1919,14 +1800,25 @@ pub const Value = struct {
}
pub fn sliceLen(val: Value, mod: *Module) u64 {
- return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod);
+ const ptr = mod.intern_pool.indexToKey(val.toIntern()).ptr;
+ return switch (ptr.len) {
+ .none => switch (mod.intern_pool.indexToKey(switch (ptr.addr) {
+ .decl => |decl| mod.declPtr(decl).ty.toIntern(),
+ .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(),
+ .comptime_field => |comptime_field| mod.intern_pool.typeOf(comptime_field),
+ else => unreachable,
+ })) {
+ .array_type => |array_type| array_type.len,
+ else => 1,
+ },
+ else => ptr.len.toValue().toUnsignedInt(mod),
+ };
}
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
return switch (val.ip_index) {
- .undef => Value.undef,
.none => switch (val.tag()) {
.repeated => val.castTag(.repeated).?.data,
.aggregate => val.castTag(.aggregate).?.data[index],
@@ -1934,6 +1826,9 @@ pub const Value = struct {
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .undef => |ty| (try mod.intern(.{
+ .undef = ty.toType().elemType2(mod).toIntern(),
+ })).toValue(),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
@@ -2492,7 +2387,7 @@ pub const Value = struct {
}
return OverflowArithmeticResult{
- .overflow_bit = boolToInt(overflowed),
+ .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
};
}
@@ -2645,7 +2540,8 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (val.isUndef(mod)) return Value.undef;
+ if (val.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
const info = ty.intInfo(mod);
@@ -2687,7 +2583,8 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -2725,7 +2622,8 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty);
@@ -2752,7 +2650,8 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -2789,7 +2688,8 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -3233,7 +3133,7 @@ pub const Value = struct {
result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits);
}
return OverflowArithmeticResult{
- .overflow_bit = boolToInt(overflowed),
+ .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)),
.wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()),
};
}
@@ -4267,12 +4167,6 @@ pub const Value = struct {
return if (x) Value.true else Value.false;
}
- pub fn boolToInt(x: bool) Value {
- const zero: Value = .{ .ip_index = .zero, .legacy = undefined };
- const one: Value = .{ .ip_index = .one, .legacy = undefined };
- return if (x) one else zero;
- }
-
pub const RuntimeIndex = InternPool.RuntimeIndex;
/// This function is used in the debugger pretty formatters in tools/ to fetch the