aboutsummaryrefslogtreecommitdiff
path: root/lib/std/hash_map.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2024-03-16 15:45:10 -0700
committerGitHub <noreply@github.com>2024-03-16 18:45:10 -0400
commit242ab81112c05fa815523551a6f612c5a12c52b2 (patch)
tree32b581f036137436932883ffdfab24aec64576db /lib/std/hash_map.zig
parent1b8d1b18c7ec1c8002046d8a7e131bf21ccf92ca (diff)
downloadzig-242ab81112c05fa815523551a6f612c5a12c52b2.tar.gz
zig-242ab81112c05fa815523551a6f612c5a12c52b2.zip
std: introduce pointer stability locks to hash maps (#17719)
This adds std.debug.SafetyLock and uses it in std.HashMapUnmanaged by adding lockPointers() and unlockPointers(). This provides a way to detect when an illegal modification has happened and panic rather than invoke undefined behavior.
Diffstat (limited to 'lib/std/hash_map.zig')
-rw-r--r--lib/std/hash_map.zig104
1 files changed, 78 insertions, 26 deletions
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index ea21356e99..f44bf1435d 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -416,6 +416,23 @@ pub fn HashMap(
};
}
+ /// Puts the hash map into a state where any method call that would
+ /// cause an existing key or value pointer to become invalidated will
+ /// instead trigger an assertion.
+ ///
+ /// An additional call to `lockPointers` in such state also triggers an
+ /// assertion.
+ ///
+ /// `unlockPointers` returns the hash map to the previous state.
+ pub fn lockPointers(self: *Self) void {
+ self.unmanaged.lockPointers();
+ }
+
+ /// Undoes a call to `lockPointers`.
+ pub fn unlockPointers(self: *Self) void {
+ self.unmanaged.unlockPointers();
+ }
+
/// Release the backing array and invalidate this map.
/// This does *not* deinit keys, values, or the context!
/// If your keys or values need to be released, ensure
@@ -672,6 +689,7 @@ pub fn HashMap(
/// Set the map to an empty state, making deinitialization a no-op, and
/// returning a copy of the original.
pub fn move(self: *Self) Self {
+ self.unmanaged.pointer_stability.assertUnlocked();
const result = self.*;
self.unmanaged = .{};
return result;
@@ -722,6 +740,9 @@ pub fn HashMapUnmanaged(
/// `max_load_percentage`.
available: Size = 0,
+ /// Used to detect memory safety violations.
+ pointer_stability: std.debug.SafetyLock = .{},
+
// This is purely empirical and not a /very smart magic constantâ„¢/.
/// Capacity of the first grow when bootstrapping the hashmap.
const minimal_capacity = 8;
@@ -884,11 +905,29 @@ pub fn HashMapUnmanaged(
};
}
+ /// Puts the hash map into a state where any method call that would
+ /// cause an existing key or value pointer to become invalidated will
+ /// instead trigger an assertion.
+ ///
+ /// An additional call to `lockPointers` in such state also triggers an
+ /// assertion.
+ ///
+ /// `unlockPointers` returns the hash map to the previous state.
+ pub fn lockPointers(self: *Self) void {
+ self.pointer_stability.lock();
+ }
+
+ /// Undoes a call to `lockPointers`.
+ pub fn unlockPointers(self: *Self) void {
+ self.pointer_stability.unlock();
+ }
+
fn isUnderMaxLoadPercentage(size: Size, cap: Size) bool {
return size * 100 < max_load_percentage * cap;
}
pub fn deinit(self: *Self, allocator: Allocator) void {
+ self.pointer_stability.assertUnlocked();
self.deallocate(allocator);
self.* = undefined;
}
@@ -905,6 +944,8 @@ pub fn HashMapUnmanaged(
return ensureTotalCapacityContext(self, allocator, new_size, undefined);
}
pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) Allocator.Error!void {
+ self.pointer_stability.lock();
+ defer self.pointer_stability.unlock();
if (new_size > self.size)
try self.growIfNeeded(allocator, new_size - self.size, ctx);
}
@@ -919,14 +960,18 @@ pub fn HashMapUnmanaged(
}
pub fn clearRetainingCapacity(self: *Self) void {
+ self.pointer_stability.lock();
+ defer self.pointer_stability.unlock();
if (self.metadata) |_| {
self.initMetadatas();
self.size = 0;
- self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100));
+ self.available = @truncate((self.capacity() * max_load_percentage) / 100);
}
}
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
+ self.pointer_stability.lock();
+ defer self.pointer_stability.unlock();
self.deallocate(allocator);
self.size = 0;
self.available = 0;
@@ -997,9 +1042,11 @@ pub fn HashMapUnmanaged(
return self.putNoClobberContext(allocator, key, value, undefined);
}
pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) Allocator.Error!void {
- assert(!self.containsContext(key, ctx));
- try self.growIfNeeded(allocator, 1, ctx);
-
+ {
+ self.pointer_stability.lock();
+ defer self.pointer_stability.unlock();
+ try self.growIfNeeded(allocator, 1, ctx);
+ }
self.putAssumeCapacityNoClobberContext(key, value, ctx);
}
@@ -1028,7 +1075,7 @@ pub fn HashMapUnmanaged(
const hash = ctx.hash(key);
const mask = self.capacity() - 1;
- var idx = @as(usize, @truncate(hash & mask));
+ var idx: usize = @truncate(hash & mask);
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed()) {
@@ -1280,17 +1327,21 @@ pub fn HashMapUnmanaged(
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) Allocator.Error!GetOrPutResult {
- self.growIfNeeded(allocator, 1, ctx) catch |err| {
- // If allocation fails, try to do the lookup anyway.
- // If we find an existing item, we can return it.
- // Otherwise return the error, we could not add another.
- const index = self.getIndex(key, key_ctx) orelse return err;
- return GetOrPutResult{
- .key_ptr = &self.keys()[index],
- .value_ptr = &self.values()[index],
- .found_existing = true,
+ {
+ self.pointer_stability.lock();
+ defer self.pointer_stability.unlock();
+ self.growIfNeeded(allocator, 1, ctx) catch |err| {
+ // If allocation fails, try to do the lookup anyway.
+ // If we find an existing item, we can return it.
+ // Otherwise return the error, we could not add another.
+ const index = self.getIndex(key, key_ctx) orelse return err;
+ return GetOrPutResult{
+ .key_ptr = &self.keys()[index],
+ .value_ptr = &self.values()[index],
+ .found_existing = true,
+ };
};
- };
+ }
return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
}
@@ -1495,6 +1546,7 @@ pub fn HashMapUnmanaged(
/// Set the map to an empty state, making deinitialization a no-op, and
/// returning a copy of the original.
pub fn move(self: *Self) Self {
+ self.pointer_stability.assertUnlocked();
const result = self.*;
self.* = .{};
return result;
@@ -1506,28 +1558,28 @@ pub fn HashMapUnmanaged(
assert(new_cap > self.capacity());
assert(std.math.isPowerOfTwo(new_cap));
- var map = Self{};
+ var map: Self = .{};
defer map.deinit(allocator);
+ map.pointer_stability.lock();
try map.allocate(allocator, new_cap);
map.initMetadatas();
map.available = @truncate((new_cap * max_load_percentage) / 100);
if (self.size != 0) {
const old_capacity = self.capacity();
- var i: Size = 0;
- var metadata = self.metadata.?;
- const keys_ptr = self.keys();
- const values_ptr = self.values();
- while (i < old_capacity) : (i += 1) {
- if (metadata[i].isUsed()) {
- map.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], ctx);
- if (map.size == self.size)
- break;
- }
+ for (
+ self.metadata.?[0..old_capacity],
+ self.keys()[0..old_capacity],
+ self.values()[0..old_capacity],
+ ) |m, k, v| {
+ if (!m.isUsed()) continue;
+ map.putAssumeCapacityNoClobberContext(k, v, ctx);
+ if (map.size == self.size) break;
}
}
self.size = 0;
+ self.pointer_stability = .{ .state = .unlocked };
std.mem.swap(Self, self, &map);
}