aboutsummaryrefslogtreecommitdiff
path: root/src/mutable_value.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2024-04-08 16:14:39 +0100
committermlugg <mlugg@mlugg.co.uk>2024-04-17 13:41:25 +0100
commitd0e74ffe52d0ae0d876d4e3f7ef5d32b5f5460a5 (patch)
tree001cb2b59a48e913e6036675b71f4736c55647c7 /src/mutable_value.zig
parent77abd3a96aa8c8c1277cdbb33d88149d4674d389 (diff)
downloadzig-d0e74ffe52d0ae0d876d4e3f7ef5d32b5f5460a5.tar.gz
zig-d0e74ffe52d0ae0d876d4e3f7ef5d32b5f5460a5.zip
compiler: rework comptime pointer representation and access
We've got a big one here! This commit reworks how we represent pointers in the InternPool, and rewrites the logic for loading and storing from them at comptime. Firstly, the pointer representation. Previously, pointers were represented in a highly structured manner: pointers to fields, array elements, etc, were explicitly represented. This works well for simple cases, but is quite difficult to handle in the cases of unusual reinterpretations, pointer casts, offsets, etc. Therefore, pointers are now represented in a more "flat" manner. For types without well-defined layouts -- such as comptime-only types, automatic-layout aggregates, and so on -- we still use this "hierarchical" structure. However, for types with well-defined layouts, we use a byte offset associated with the pointer. This allows the comptime pointer access logic to deal with reinterpreted pointers far more gracefully, because the "base address" of a pointer -- for instance a `field` -- is a single value which pointer accesses cannot exceed since the parent has undefined layout. This strategy is also more useful to most backends -- see the updated logic in `codegen.zig` and `codegen/llvm.zig`. For backends which do prefer a chain of field and elements accesses for lowering pointer values, such as SPIR-V, there is a helpful function in `Value` which creates a strategy to derive a pointer value using ideally only field and element accesses. This is actually more correct than the previous logic, since it correctly handles pointer casts which, after the dust has settled, end up referring exactly to an aggregate field or array element. In terms of the pointer access code, it has been rewritten from the ground up. The old logic had become rather a mess of special cases being added whenever bugs were hit, and was still riddled with bugs. The new logic was written to handle the "difficult" cases correctly, the most notable of which is restructuring of a comptime-only array (for instance, converting a `[3][2]comptime_int` to a `[2][3]comptime_int`. Currently, the logic for loading and storing work somewhat differently, but a future change will likely improve the loading logic to bring it more in line with the store strategy. As far as I can tell, the rewrite has fixed all bugs exposed by #19414. As a part of this, the comptime bitcast logic has also been rewritten. Previously, bitcasts simply worked by serializing the entire value into an in-memory buffer, then deserializing it. This strategy has two key weaknesses: pointers, and undefined values. Representations of these values at comptime cannot be easily serialized/deserialized whilst preserving data, which means many bitcasts would become runtime-known if pointers were involved, or would turn `undefined` values into `0xAA`. The new logic works by "flattening" the datastructure to be cast into a sequence of bit-packed atomic values, and then "unflattening" it; using serialization when necessary, but with special handling for `undefined` values and for pointers which align in virtual memory. The resulting code is definitely slower -- more on this later -- but it is correct. The pointer access and bitcast logic required some helper functions and types which are not generally useful elsewhere, so I opted to split them into separate files `Sema/comptime_ptr_access.zig` and `Sema/bitcast.zig`, with simple re-exports in `Sema.zig` for their small public APIs. Whilst working on this branch, I caught various unrelated bugs with transitive Sema errors, and with the handling of `undefined` values. These bugs have been fixed, and corresponding behavior test added. In terms of performance, I do anticipate that this commit will regress performance somewhat, because the new pointer access and bitcast logic is necessarily more complex. I have not yet taken performance measurements, but will do shortly, and post the results in this PR. If the performance regression is severe, I will do work to to optimize the new logic before merge. Resolves: #19452 Resolves: #19460
Diffstat (limited to 'src/mutable_value.zig')
-rw-r--r--src/mutable_value.zig149
1 files changed, 111 insertions, 38 deletions
diff --git a/src/mutable_value.zig b/src/mutable_value.zig
index f16a8fd3f9..e9f19e6596 100644
--- a/src/mutable_value.zig
+++ b/src/mutable_value.zig
@@ -54,22 +54,22 @@ pub const MutableValue = union(enum) {
payload: *MutableValue,
};
- pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!InternPool.Index {
+ pub fn intern(mv: MutableValue, zcu: *Zcu, arena: Allocator) Allocator.Error!Value {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
- return switch (mv) {
+ return Value.fromInterned(switch (mv) {
.interned => |ip_index| ip_index,
.eu_payload => |sv| try ip.get(gpa, .{ .error_union = .{
.ty = sv.ty,
- .val = .{ .payload = try sv.child.intern(zcu, arena) },
+ .val = .{ .payload = (try sv.child.intern(zcu, arena)).toIntern() },
} }),
.opt_payload => |sv| try ip.get(gpa, .{ .opt = .{
.ty = sv.ty,
- .val = try sv.child.intern(zcu, arena),
+ .val = (try sv.child.intern(zcu, arena)).toIntern(),
} }),
.repeated => |sv| try ip.get(gpa, .{ .aggregate = .{
.ty = sv.ty,
- .storage = .{ .repeated_elem = try sv.child.intern(zcu, arena) },
+ .storage = .{ .repeated_elem = (try sv.child.intern(zcu, arena)).toIntern() },
} }),
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
.ty = b.ty,
@@ -78,24 +78,24 @@ pub const MutableValue = union(enum) {
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);
for (a.elems, elems) |mut_elem, *interned_elem| {
- interned_elem.* = try mut_elem.intern(zcu, arena);
+ interned_elem.* = (try mut_elem.intern(zcu, arena)).toIntern();
}
- return ip.get(gpa, .{ .aggregate = .{
+ return Value.fromInterned(try ip.get(gpa, .{ .aggregate = .{
.ty = a.ty,
.storage = .{ .elems = elems },
- } });
+ } }));
},
.slice => |s| try ip.get(gpa, .{ .slice = .{
.ty = s.ty,
- .ptr = try s.ptr.intern(zcu, arena),
- .len = try s.len.intern(zcu, arena),
+ .ptr = (try s.ptr.intern(zcu, arena)).toIntern(),
+ .len = (try s.len.intern(zcu, arena)).toIntern(),
} }),
.un => |u| try ip.get(gpa, .{ .un = .{
.ty = u.ty,
.tag = u.tag,
- .val = try u.payload.intern(zcu, arena),
+ .val = (try u.payload.intern(zcu, arena)).toIntern(),
} }),
- };
+ });
}
/// Un-interns the top level of this `MutableValue`, if applicable.
@@ -248,9 +248,11 @@ pub const MutableValue = union(enum) {
},
.Union => {
const payload = try arena.create(MutableValue);
- // HACKHACK: this logic is silly, but Sema detects it and reverts the change where needed.
- // See comment at the top of `Sema.beginComptimePtrMutationInner`.
- payload.* = .{ .interned = .undef };
+ const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(zcu);
+ payload.* = .{ .interned = try ip.get(
+ gpa,
+ .{ .undef = backing_ty.toIntern() },
+ ) };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
@@ -294,7 +296,6 @@ pub const MutableValue = union(enum) {
/// Get a pointer to the `MutableValue` associated with a field/element.
/// The returned pointer can be safety mutated through to modify the field value.
/// The returned pointer is valid until the representation of `mv` changes.
- /// This function does *not* support accessing the ptr/len field of slices.
pub fn elem(
mv: *MutableValue,
zcu: *Zcu,
@@ -304,18 +305,18 @@ pub const MutableValue = union(enum) {
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
// Convert to the `aggregate` representation.
- switch (mv) {
- .eu_payload, .opt_payload, .slice, .un => unreachable,
+ switch (mv.*) {
+ .eu_payload, .opt_payload, .un => unreachable,
.interned => {
try mv.unintern(zcu, arena, false, false);
},
.bytes => |bytes| {
const elems = try arena.alloc(MutableValue, bytes.data.len);
- for (bytes.data, elems) |byte, interned_byte| {
- interned_byte.* = try ip.get(gpa, .{ .int = .{
+ for (bytes.data, elems) |byte, *interned_byte| {
+ interned_byte.* = .{ .interned = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
- } });
+ } }) };
}
mv.* = .{ .aggregate = .{
.ty = bytes.ty,
@@ -331,9 +332,17 @@ pub const MutableValue = union(enum) {
.elems = elems,
} };
},
- .aggregate => {},
+ .slice, .aggregate => {},
+ }
+ switch (mv.*) {
+ .aggregate => |*agg| return &agg.elems[field_idx],
+ .slice => |*slice| return switch (field_idx) {
+ Value.slice_ptr_index => slice.ptr,
+ Value.slice_len_index => slice.len,
+ else => unreachable,
+ },
+ else => unreachable,
}
- return &mv.aggregate.elems[field_idx];
}
/// Modify a single field of a `MutableValue` which represents an aggregate or slice, leaving others
@@ -349,43 +358,44 @@ pub const MutableValue = union(enum) {
) Allocator.Error!void {
const ip = &zcu.intern_pool;
const is_trivial_int = field_val.isTrivialInt(zcu);
- try mv.unintern(arena, is_trivial_int, true);
- switch (mv) {
+ try mv.unintern(zcu, arena, is_trivial_int, true);
+ switch (mv.*) {
.interned,
.eu_payload,
.opt_payload,
.un,
=> unreachable,
.slice => |*s| switch (field_idx) {
- Value.slice_ptr_index => s.ptr = field_val,
- Value.slice_len_index => s.len = field_val,
+ Value.slice_ptr_index => s.ptr.* = field_val,
+ Value.slice_len_index => s.len.* = field_val,
+ else => unreachable,
},
.bytes => |b| {
assert(is_trivial_int);
- assert(field_val.typeOf() == Type.u8);
- b.data[field_idx] = Value.fromInterned(field_val.interned).toUnsignedInt(zcu);
+ assert(field_val.typeOf(zcu).toIntern() == .u8_type);
+ b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
},
.repeated => |r| {
if (field_val.eqlTrivial(r.child.*)) return;
// We must switch to either the `aggregate` or the `bytes` representation.
const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
- if (ip.zigTypeTag(r.ty) != .Struct and
+ if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .Struct and
is_trivial_int and
- Type.fromInterned(r.ty).childType(zcu) == .u8_type and
+ Type.fromInterned(r.ty).childType(zcu).toIntern() == .u8_type and
r.child.isTrivialInt(zcu))
{
// We can use the `bytes` representation.
const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
- const repeated_byte = Value.fromInterned(r.child.interned).getUnsignedInt(zcu);
- @memset(bytes, repeated_byte);
- bytes[field_idx] = Value.fromInterned(field_val.interned).getUnsignedInt(zcu);
+ const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
+ @memset(bytes, @intCast(repeated_byte));
+ bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
mv.* = .{ .bytes = .{
.ty = r.ty,
.data = bytes,
} };
} else {
// We must use the `aggregate` representation.
- const mut_elems = try arena.alloc(u8, @intCast(len_inc_sent));
+ const mut_elems = try arena.alloc(MutableValue, @intCast(len_inc_sent));
@memset(mut_elems, r.child.*);
mut_elems[field_idx] = field_val;
mv.* = .{ .aggregate = .{
@@ -396,12 +406,12 @@ pub const MutableValue = union(enum) {
},
.aggregate => |a| {
a.elems[field_idx] = field_val;
- const is_struct = ip.zigTypeTag(a.ty) == .Struct;
+ const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .Struct;
// Attempt to switch to a more efficient representation.
const is_repeated = for (a.elems) |e| {
if (!e.eqlTrivial(field_val)) break false;
} else true;
- if (is_repeated) {
+ if (!is_struct and is_repeated) {
// Switch to `repeated` repr
const mut_repeated = try arena.create(MutableValue);
mut_repeated.* = field_val;
@@ -425,7 +435,7 @@ pub const MutableValue = union(enum) {
} else {
const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| {
- b.* = Value.fromInterned(elem_val.interned).toUnsignedInt(zcu);
+ b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
}
mv.* = .{ .bytes = .{
.ty = a.ty,
@@ -505,4 +515,67 @@ pub const MutableValue = union(enum) {
inline else => |x| Type.fromInterned(x.ty),
};
}
+
+ pub fn unpackOptional(mv: MutableValue, zcu: *Zcu) union(enum) {
+ undef,
+ null,
+ payload: MutableValue,
+ } {
+ return switch (mv) {
+ .opt_payload => |pl| return .{ .payload = pl.child.* },
+ .interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
+ .undef => return .undef,
+ .opt => |opt| if (opt.val == .none) .null else .{ .payload = .{ .interned = opt.val } },
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+ }
+
+ pub fn unpackErrorUnion(mv: MutableValue, zcu: *Zcu) union(enum) {
+ undef,
+ err: InternPool.NullTerminatedString,
+ payload: MutableValue,
+ } {
+ return switch (mv) {
+ .eu_payload => |pl| return .{ .payload = pl.child.* },
+ .interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
+ .undef => return .undef,
+ .error_union => |eu| switch (eu.val) {
+ .err_name => |name| .{ .err = name },
+ .payload => |pl| .{ .payload = .{ .interned = pl } },
+ },
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+ }
+
+ /// Fast equality checking which may return false negatives.
+ /// Used for deciding when to switch aggregate representations without fully
+ /// interning many values.
+ fn eqlTrivial(a: MutableValue, b: MutableValue) bool {
+ const Tag = @typeInfo(MutableValue).Union.tag_type.?;
+ if (@as(Tag, a) != @as(Tag, b)) return false;
+ return switch (a) {
+ .interned => |a_ip| a_ip == b.interned,
+ .eu_payload => |a_pl| a_pl.ty == b.eu_payload.ty and a_pl.child.eqlTrivial(b.eu_payload.child.*),
+ .opt_payload => |a_pl| a_pl.ty == b.opt_payload.ty and a_pl.child.eqlTrivial(b.opt_payload.child.*),
+ .repeated => |a_rep| a_rep.ty == b.repeated.ty and a_rep.child.eqlTrivial(b.repeated.child.*),
+ .bytes => |a_bytes| a_bytes.ty == b.bytes.ty and std.mem.eql(u8, a_bytes.data, b.bytes.data),
+ .aggregate => |a_agg| {
+ const b_agg = b.aggregate;
+ if (a_agg.ty != b_agg.ty) return false;
+ if (a_agg.elems.len != b_agg.elems.len) return false;
+ for (a_agg.elems, b_agg.elems) |a_elem, b_elem| {
+ if (!a_elem.eqlTrivial(b_elem)) return false;
+ }
+ return true;
+ },
+ .slice => |a_slice| a_slice.ty == b.slice.ty and
+ a_slice.ptr.interned == b.slice.ptr.interned and
+ a_slice.len.interned == b.slice.len.interned,
+ .un => |a_un| a_un.ty == b.un.ty and a_un.tag == b.un.tag and a_un.payload.eqlTrivial(b.un.payload.*),
+ };
+ }
};