aboutsummaryrefslogtreecommitdiff
path: root/lib/std/Thread
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2024-03-11 22:42:32 +0200
committerGitHub <noreply@github.com>2024-03-11 22:42:32 +0200
commit4f782d1e853accbe1c4bfab2617c3813d4b1e59f (patch)
tree0eb768171ecfb058fba72d199afc951af206f8fb /lib/std/Thread
parentd0c06ca7127110a8afeb0ef524a197049892db21 (diff)
parent6067d39522f939c08dd3f3ea4fb5889ff0024e72 (diff)
downloadzig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.tar.gz
zig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.zip
Merge pull request #18994 from ExpidusOS/feat/container-layout-rename-fields
std.builtin: make enum fields lowercase
Diffstat (limited to 'lib/std/Thread')
-rw-r--r--lib/std/Thread/Condition.zig24
-rw-r--r--lib/std/Thread/Futex.zig30
-rw-r--r--lib/std/Thread/Mutex.zig20
-rw-r--r--lib/std/Thread/ResetEvent.zig18
-rw-r--r--lib/std/Thread/RwLock.zig32
-rw-r--r--lib/std/Thread/WaitGroup.zig12
6 files changed, 68 insertions, 68 deletions
diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig
index b1154f8bd0..39b8483950 100644
--- a/lib/std/Thread/Condition.zig
+++ b/lib/std/Thread/Condition.zig
@@ -163,7 +163,7 @@ const WindowsImpl = struct {
if (comptime builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well.
- mutex.impl.locking_thread.store(0, .Unordered);
+ mutex.impl.locking_thread.store(0, .unordered);
}
const rc = os.windows.kernel32.SleepConditionVariableSRW(
&self.condition,
@@ -173,7 +173,7 @@ const WindowsImpl = struct {
);
if (comptime builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well.
- mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .Unordered);
+ mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered);
}
// Return error.Timeout if we know the timeout elapsed correctly.
@@ -212,8 +212,8 @@ const FutexImpl = struct {
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change)
//
// Acquire barrier to ensure the epoch load happens before the state load.
- var epoch = self.epoch.load(.Acquire);
- var state = self.state.fetchAdd(one_waiter, .Monotonic);
+ var epoch = self.epoch.load(.acquire);
+ var state = self.state.fetchAdd(one_waiter, .monotonic);
assert(state & waiter_mask != waiter_mask);
state += one_waiter;
@@ -231,30 +231,30 @@ const FutexImpl = struct {
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
while (state & signal_mask != 0) {
const new_state = state - one_waiter - one_signal;
- state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
+ state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
}
// Remove the waiter we added and officially return timed out.
const new_state = state - one_waiter;
- state = self.state.cmpxchgWeak(state, new_state, .Monotonic, .Monotonic) orelse return err;
+ state = self.state.cmpxchgWeak(state, new_state, .monotonic, .monotonic) orelse return err;
}
},
};
- epoch = self.epoch.load(.Acquire);
- state = self.state.load(.Monotonic);
+ epoch = self.epoch.load(.acquire);
+ state = self.state.load(.monotonic);
// Try to wake up by consuming a signal and decremented the waiter we added previously.
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
while (state & signal_mask != 0) {
const new_state = state - one_waiter - one_signal;
- state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
+ state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
}
}
}
fn wake(self: *Impl, comptime notify: Notify) void {
- var state = self.state.load(.Monotonic);
+ var state = self.state.load(.monotonic);
while (true) {
const waiters = (state & waiter_mask) / one_waiter;
const signals = (state & signal_mask) / one_signal;
@@ -275,7 +275,7 @@ const FutexImpl = struct {
// Reserve the amount of waiters to wake by incrementing the signals count.
// Release barrier ensures code before the wake() happens before the signal it posted and consumed by the wait() threads.
const new_state = state + (one_signal * to_wake);
- state = self.state.cmpxchgWeak(state, new_state, .Release, .Monotonic) orelse {
+ state = self.state.cmpxchgWeak(state, new_state, .release, .monotonic) orelse {
// Wake up the waiting threads we reserved above by changing the epoch value.
// NOTE: a waiting thread could miss a wake up if *exactly* ((1<<32)-1) wake()s happen between it observing the epoch and sleeping on it.
// This is very unlikely due to how many precise amount of Futex.wake() calls that would be between the waiting thread's potential preemption.
@@ -288,7 +288,7 @@ const FutexImpl = struct {
// - T1: s = LOAD(&state)
// - T2: UPDATE(&state, signal) + FUTEX_WAKE(&epoch)
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed both epoch change and state change)
- _ = self.epoch.fetchAdd(1, .Release);
+ _ = self.epoch.fetchAdd(1, .release);
Futex.wake(&self.epoch, to_wake);
return;
};
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index b6f37feda4..160d983c71 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -40,7 +40,7 @@ pub fn timedWait(ptr: *const atomic.Value(u32), expect: u32, timeout_ns: u64) er
// Avoid calling into the OS for no-op timeouts.
if (timeout_ns == 0) {
- if (ptr.load(.SeqCst) != expect) return;
+ if (ptr.load(.seq_cst) != expect) return;
return error.Timeout;
}
@@ -783,16 +783,16 @@ const PosixImpl = struct {
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
// - T1: goes to sleep and misses both the ptr change and T2's wake up
//
- // SeqCst as Acquire barrier to ensure the announcement happens before the ptr check below.
- // SeqCst as shared modification order to form a happens-before edge with the fence(.SeqCst)+load() in wake().
- var pending = bucket.pending.fetchAdd(1, .SeqCst);
+ // seq_cst as Acquire barrier to ensure the announcement happens before the ptr check below.
+ // seq_cst as shared modification order to form a happens-before edge with the fence(.seq_cst)+load() in wake().
+ var pending = bucket.pending.fetchAdd(1, .seq_cst);
assert(pending < std.math.maxInt(usize));
// If the wait gets cancelled, remove the pending count we previously added.
// This is done outside the mutex lock to keep the critical section short in case of contention.
var cancelled = false;
defer if (cancelled) {
- pending = bucket.pending.fetchSub(1, .Monotonic);
+ pending = bucket.pending.fetchSub(1, .monotonic);
assert(pending > 0);
};
@@ -850,11 +850,11 @@ const PosixImpl = struct {
// but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
//
// Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
- // fence(SeqCst) effectively converts the ptr update to SeqCst and the pending load to SeqCst: creating a Store-Load barrier.
+ // fence(seq_cst) effectively converts the ptr update to seq_cst and the pending load to seq_cst: creating a Store-Load barrier.
//
- // The pending count increment in wait() must also now use SeqCst for the update + this pending load
- // to be in the same modification order as our load isn't using Release/Acquire to guarantee it.
- bucket.pending.fence(.SeqCst);
+ // The pending count increment in wait() must also now use seq_cst for the update + this pending load
+ // to be in the same modification order as our load isn't using release/acquire to guarantee it.
+ bucket.pending.fence(.seq_cst);
if (bucket.pending.load(.Monotonic) == 0) {
return;
}
@@ -912,7 +912,7 @@ test "signaling" {
current: u32 = 0,
fn hit(self: *@This()) void {
- _ = self.value.fetchAdd(1, .Release);
+ _ = self.value.fetchAdd(1, .release);
Futex.wake(&self.value, 1);
}
@@ -921,7 +921,7 @@ test "signaling" {
// Wait for the value to change from hit()
var new_value: u32 = undefined;
while (true) {
- new_value = self.value.load(.Acquire);
+ new_value = self.value.load(.acquire);
if (new_value != self.current) break;
Futex.wait(&self.value, self.current);
}
@@ -968,7 +968,7 @@ test "broadcasting" {
fn wait(self: *@This()) !void {
// Decrement the counter.
// Release ensures stuff before this barrier.wait() happens before the last one.
- const count = self.count.fetchSub(1, .Release);
+ const count = self.count.fetchSub(1, .release);
try testing.expect(count <= num_threads);
try testing.expect(count > 0);
@@ -976,15 +976,15 @@ test "broadcasting" {
// Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
if (count - 1 == 0) {
- _ = self.count.load(.Acquire); // TODO: could be fence(Acquire) if not for TSAN
- self.futex.store(1, .Release);
+ _ = self.count.load(.acquire); // TODO: could be fence(acquire) if not for TSAN
+ self.futex.store(1, .release);
Futex.wake(&self.futex, num_threads - 1);
return;
}
// Other threads wait until last counter wakes them up.
// Acquire on futex synchronizes with last barrier count to ensure stuff before all barrier.wait()'s happen before us.
- while (self.futex.load(.Acquire) == 0) {
+ while (self.futex.load(.acquire) == 0) {
Futex.wait(&self.futex, 0);
}
}
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index 7beb7dd752..67472ffd9c 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -72,23 +72,23 @@ const DebugImpl = struct {
inline fn tryLock(self: *@This()) bool {
const locking = self.impl.tryLock();
if (locking) {
- self.locking_thread.store(Thread.getCurrentId(), .Unordered);
+ self.locking_thread.store(Thread.getCurrentId(), .unordered);
}
return locking;
}
inline fn lock(self: *@This()) void {
const current_id = Thread.getCurrentId();
- if (self.locking_thread.load(.Unordered) == current_id and current_id != 0) {
+ if (self.locking_thread.load(.unordered) == current_id and current_id != 0) {
@panic("Deadlock detected");
}
self.impl.lock();
- self.locking_thread.store(current_id, .Unordered);
+ self.locking_thread.store(current_id, .unordered);
}
inline fn unlock(self: *@This()) void {
- assert(self.locking_thread.load(.Unordered) == Thread.getCurrentId());
- self.locking_thread.store(0, .Unordered);
+ assert(self.locking_thread.load(.unordered) == Thread.getCurrentId());
+ self.locking_thread.store(0, .unordered);
self.impl.unlock();
}
};
@@ -167,12 +167,12 @@ const FutexImpl = struct {
// - `lock bts` is smaller instruction-wise which makes it better for inlining
if (comptime builtin.target.cpu.arch.isX86()) {
const locked_bit = @ctz(locked);
- return self.state.bitSet(locked_bit, .Acquire) == 0;
+ return self.state.bitSet(locked_bit, .acquire) == 0;
}
// Acquire barrier ensures grabbing the lock happens before the critical section
// and that the previous lock holder's critical section happens before we grab the lock.
- return self.state.cmpxchgWeak(unlocked, locked, .Acquire, .Monotonic) == null;
+ return self.state.cmpxchgWeak(unlocked, locked, .acquire, .monotonic) == null;
}
fn lockSlow(self: *@This()) void {
@@ -180,7 +180,7 @@ const FutexImpl = struct {
// Avoid doing an atomic swap below if we already know the state is contended.
// An atomic swap unconditionally stores which marks the cache-line as modified unnecessarily.
- if (self.state.load(.Monotonic) == contended) {
+ if (self.state.load(.monotonic) == contended) {
Futex.wait(&self.state, contended);
}
@@ -193,7 +193,7 @@ const FutexImpl = struct {
//
// Acquire barrier ensures grabbing the lock happens before the critical section
// and that the previous lock holder's critical section happens before we grab the lock.
- while (self.state.swap(contended, .Acquire) != unlocked) {
+ while (self.state.swap(contended, .acquire) != unlocked) {
Futex.wait(&self.state, contended);
}
}
@@ -206,7 +206,7 @@ const FutexImpl = struct {
//
// Release barrier ensures the critical section happens before we let go of the lock
// and that our critical section happens before the next lock holder grabs the lock.
- const state = self.state.swap(unlocked, .Release);
+ const state = self.state.swap(unlocked, .release);
assert(state != unlocked);
if (state == contended) {
diff --git a/lib/std/Thread/ResetEvent.zig b/lib/std/Thread/ResetEvent.zig
index 431d54e852..b7e5758780 100644
--- a/lib/std/Thread/ResetEvent.zig
+++ b/lib/std/Thread/ResetEvent.zig
@@ -96,7 +96,7 @@ const FutexImpl = struct {
fn isSet(self: *const Impl) bool {
// Acquire barrier ensures memory accesses before set() happen before we return true.
- return self.state.load(.Acquire) == is_set;
+ return self.state.load(.acquire) == is_set;
}
fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void {
@@ -112,9 +112,9 @@ const FutexImpl = struct {
// Try to set the state from `unset` to `waiting` to indicate
// to the set() thread that others are blocked on the ResetEvent.
// We avoid using any strict barriers until the end when we know the ResetEvent is set.
- var state = self.state.load(.Monotonic);
+ var state = self.state.load(.monotonic);
if (state == unset) {
- state = self.state.cmpxchgStrong(state, waiting, .Monotonic, .Monotonic) orelse waiting;
+ state = self.state.cmpxchgStrong(state, waiting, .monotonic, .monotonic) orelse waiting;
}
// Wait until the ResetEvent is set since the state is waiting.
@@ -124,7 +124,7 @@ const FutexImpl = struct {
const wait_result = futex_deadline.wait(&self.state, waiting);
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
- state = self.state.load(.Monotonic);
+ state = self.state.load(.monotonic);
if (state != waiting) {
break;
}
@@ -135,25 +135,25 @@ const FutexImpl = struct {
// Acquire barrier ensures memory accesses before set() happen before we return.
assert(state == is_set);
- self.state.fence(.Acquire);
+ self.state.fence(.acquire);
}
fn set(self: *Impl) void {
// Quick check if the ResetEvent is already set before doing the atomic swap below.
// set() could be getting called quite often and multiple threads calling swap() increases contention unnecessarily.
- if (self.state.load(.Monotonic) == is_set) {
+ if (self.state.load(.monotonic) == is_set) {
return;
}
// Mark the ResetEvent as set and unblock all waiters waiting on it if any.
// Release barrier ensures memory accesses before set() happen before the ResetEvent is observed to be "set".
- if (self.state.swap(is_set, .Release) == waiting) {
+ if (self.state.swap(is_set, .release) == waiting) {
Futex.wake(&self.state, std.math.maxInt(u32));
}
}
fn reset(self: *Impl) void {
- self.state.store(unset, .Monotonic);
+ self.state.store(unset, .monotonic);
}
};
@@ -254,7 +254,7 @@ test "broadcast" {
counter: std.atomic.Value(usize) = std.atomic.Value(usize).init(num_threads),
fn wait(self: *@This()) void {
- if (self.counter.fetchSub(1, .AcqRel) == 1) {
+ if (self.counter.fetchSub(1, .acq_rel) == 1) {
self.event.set();
}
}
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig
index 80207dee00..2152c0756c 100644
--- a/lib/std/Thread/RwLock.zig
+++ b/lib/std/Thread/RwLock.zig
@@ -179,9 +179,9 @@ pub const DefaultRwLock = struct {
pub fn tryLock(rwl: *DefaultRwLock) bool {
if (rwl.mutex.tryLock()) {
- const state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ const state = @atomicLoad(usize, &rwl.state, .seq_cst);
if (state & READER_MASK == 0) {
- _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .seq_cst);
return true;
}
@@ -192,34 +192,34 @@ pub const DefaultRwLock = struct {
}
pub fn lock(rwl: *DefaultRwLock) void {
- _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .seq_cst);
rwl.mutex.lock();
- const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .SeqCst);
+ const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .seq_cst);
if (state & READER_MASK != 0)
rwl.semaphore.wait();
}
pub fn unlock(rwl: *DefaultRwLock) void {
- _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .seq_cst);
rwl.mutex.unlock();
}
pub fn tryLockShared(rwl: *DefaultRwLock) bool {
- const state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ const state = @atomicLoad(usize, &rwl.state, .seq_cst);
if (state & (IS_WRITING | WRITER_MASK) == 0) {
_ = @cmpxchgStrong(
usize,
&rwl.state,
state,
state + READER,
- .SeqCst,
- .SeqCst,
+ .seq_cst,
+ .seq_cst,
) orelse return true;
}
if (rwl.mutex.tryLock()) {
- _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
rwl.mutex.unlock();
return true;
}
@@ -228,25 +228,25 @@ pub const DefaultRwLock = struct {
}
pub fn lockShared(rwl: *DefaultRwLock) void {
- var state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ var state = @atomicLoad(usize, &rwl.state, .seq_cst);
while (state & (IS_WRITING | WRITER_MASK) == 0) {
state = @cmpxchgWeak(
usize,
&rwl.state,
state,
state + READER,
- .SeqCst,
- .SeqCst,
+ .seq_cst,
+ .seq_cst,
) orelse return;
}
rwl.mutex.lock();
- _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
rwl.mutex.unlock();
}
pub fn unlockShared(rwl: *DefaultRwLock) void {
- const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .SeqCst);
+ const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .seq_cst);
if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
rwl.semaphore.post();
@@ -318,12 +318,12 @@ test "concurrent access" {
self.rwl.lockShared();
defer self.rwl.unlockShared();
- if (self.writes >= num_writes or self.reads.load(.Unordered) >= num_reads)
+ if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
break;
try self.check();
- _ = self.reads.fetchAdd(1, .Monotonic);
+ _ = self.reads.fetchAdd(1, .monotonic);
}
}
diff --git a/lib/std/Thread/WaitGroup.zig b/lib/std/Thread/WaitGroup.zig
index d85188fa78..cbc3ff0c8f 100644
--- a/lib/std/Thread/WaitGroup.zig
+++ b/lib/std/Thread/WaitGroup.zig
@@ -10,22 +10,22 @@ state: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
event: std.Thread.ResetEvent = .{},
pub fn start(self: *WaitGroup) void {
- const state = self.state.fetchAdd(one_pending, .Monotonic);
+ const state = self.state.fetchAdd(one_pending, .monotonic);
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn finish(self: *WaitGroup) void {
- const state = self.state.fetchSub(one_pending, .Release);
+ const state = self.state.fetchSub(one_pending, .release);
assert((state / one_pending) > 0);
if (state == (one_pending | is_waiting)) {
- self.state.fence(.Acquire);
+ self.state.fence(.acquire);
self.event.set();
}
}
pub fn wait(self: *WaitGroup) void {
- const state = self.state.fetchAdd(is_waiting, .Acquire);
+ const state = self.state.fetchAdd(is_waiting, .acquire);
assert(state & is_waiting == 0);
if ((state / one_pending) > 0) {
@@ -34,12 +34,12 @@ pub fn wait(self: *WaitGroup) void {
}
pub fn reset(self: *WaitGroup) void {
- self.state.store(0, .Monotonic);
+ self.state.store(0, .monotonic);
self.event.reset();
}
pub fn isDone(wg: *WaitGroup) bool {
- const state = wg.state.load(.Acquire);
+ const state = wg.state.load(.acquire);
assert(state & is_waiting == 0);
return (state / one_pending) == 0;