aboutsummaryrefslogtreecommitdiff
path: root/src/InternPool.zig
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2024-07-07 23:23:17 -0400
committerJacob Young <jacobly0@users.noreply.github.com>2024-07-07 23:23:30 -0400
commit1abc904075ee37b059777869ab144854e4db0711 (patch)
tree0199fcf3a8289c6c84f4fd9af880a147372d6236 /src/InternPool.zig
parent166402c16bddccc364b9108a9e69af3a0dd6f1ab (diff)
downloadzig-1abc904075ee37b059777869ab144854e4db0711.tar.gz
zig-1abc904075ee37b059777869ab144854e4db0711.zip
InternPool: start documenting new thread-safe fields
Diffstat (limited to 'src/InternPool.zig')
-rw-r--r--src/InternPool.zig12
1 files changed, 12 insertions, 0 deletions
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 9f179b601e..8d72c20e2e 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -2,10 +2,16 @@
//! This data structure is self-contained, with the following exceptions:
//! * Module.Namespace has a pointer to Module.File
+/// One item per thread, indexed by `tid`, which is dense and unique per thread.
locals: []Local = &.{},
+/// Length must be a power of two and represents the number of simultaneous
+/// writers that can mutate any single sharded data structure.
shards: []Shard = &.{},
+/// Cached number of active bits in a `tid`.
tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0,
+/// Cached shift amount to put a `tid` in the top bits of a 31-bit value.
tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31,
+/// Cached shift amount to put a `tid` in the top bits of a 32-bit value.
tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31,
/// Rather than allocating Decl objects with an Allocator, we instead allocate
@@ -341,7 +347,11 @@ pub const DepEntry = extern struct {
};
const Local = struct {
+ /// These fields can be accessed from any thread by calling `acquire`.
+ /// They are only modified by the owning thread.
shared: Shared align(std.atomic.cache_line),
+ /// This state is fully local to the owning thread and does not require any
+ /// atomic access.
mutate: struct {
arena: std.heap.ArenaAllocator.State,
items: Mutate,
@@ -579,6 +589,7 @@ const Local = struct {
const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem));
const View = std.MultiArrayList(Elem);
+ /// Must be called when accessing from another thread.
fn acquire(list: *const ListSelf) ListSelf {
return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) };
}
@@ -703,6 +714,7 @@ const Shard = struct {
const alignment = @max(@alignOf(Header), @alignOf(Entry));
const entries_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Entry));
+ /// Must be called unless the mutate mutex is locked.
fn acquire(map: *const @This()) @This() {
return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) };
}