aboutsummaryrefslogtreecommitdiff
path: root/src/InternPool.zig
diff options
context:
space:
mode:
authormlugg <mlugg@mlugg.co.uk>2024-12-25 21:23:46 +0000
committerMatthew Lugg <mlugg@mlugg.co.uk>2024-12-26 02:19:02 +0000
commit42dac40b3feeabe39b5f191d1e72d247327133ba (patch)
tree1839a029e92e2fe818334e54bf0946ec73b1717b /src/InternPool.zig
parent497592c9b45a94fb7b6028bf45b80f183e395a9b (diff)
downloadzig-42dac40b3feeabe39b5f191d1e72d247327133ba.tar.gz
zig-42dac40b3feeabe39b5f191d1e72d247327133ba.zip
InternPool: fix segfault in `rehashTrackedInsts`
The `.empty` map in a shard is weird: it claims to have capacity 1, but you're not actually allowed to actually use that capacity. That's fine for the normal insertion algorithm, because it always resizes to a higher capacity when inserting the initial element. However, `rehashTrackedInsts` was not aware of this caveat, so sometimes tried to store to the single element of the `empty` map. This system exists to avoid an extra branch in the main resizing logic (since `new_cap = old_cap * 2` only works if the capacity is never non-zero). However, it's fine for `rehashTrackedInsts` to have an extra branch to handle this case, since it's literally called once per update.
Diffstat (limited to 'src/InternPool.zig')
-rw-r--r--src/InternPool.zig11
1 files changed, 9 insertions, 2 deletions
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 64cf95c7b2..681d2b4957 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -319,11 +319,18 @@ pub fn rehashTrackedInsts(
// We know how big each shard must be, so ensure we have the capacity we need.
for (ip.shards) |*shard| {
const want_capacity = if (shard.mutate.tracked_inst_map.len == 0) 0 else cap: {
- break :cap std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable;
+ // We need to return a capacity of at least 2 to make sure we don't have the `Map(...).empty` value.
+ // For this reason, note the `+ 1` in the below expression. This matches the behavior of `trackZir`.
+ break :cap std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3 + 1) catch unreachable;
};
const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex
if (have_capacity >= want_capacity) {
- @memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
+ if (have_capacity == 1) {
+ // The map is `.empty` -- we can't memset the entries, or we'll segfault, because
+ // the buffer is secretly constant.
+ } else {
+ @memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
+ }
continue;
}
var arena = arena_state.promote(gpa);