aboutsummaryrefslogtreecommitdiff
path: root/lib/std/Thread/RwLock.zig
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2024-03-11 22:42:32 +0200
committerGitHub <noreply@github.com>2024-03-11 22:42:32 +0200
commit4f782d1e853accbe1c4bfab2617c3813d4b1e59f (patch)
tree0eb768171ecfb058fba72d199afc951af206f8fb /lib/std/Thread/RwLock.zig
parentd0c06ca7127110a8afeb0ef524a197049892db21 (diff)
parent6067d39522f939c08dd3f3ea4fb5889ff0024e72 (diff)
downloadzig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.tar.gz
zig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.zip
Merge pull request #18994 from ExpidusOS/feat/container-layout-rename-fields
std.builtin: make enum fields lowercase
Diffstat (limited to 'lib/std/Thread/RwLock.zig')
-rw-r--r--lib/std/Thread/RwLock.zig32
1 files changed, 16 insertions, 16 deletions
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig
index 80207dee00..2152c0756c 100644
--- a/lib/std/Thread/RwLock.zig
+++ b/lib/std/Thread/RwLock.zig
@@ -179,9 +179,9 @@ pub const DefaultRwLock = struct {
pub fn tryLock(rwl: *DefaultRwLock) bool {
if (rwl.mutex.tryLock()) {
- const state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ const state = @atomicLoad(usize, &rwl.state, .seq_cst);
if (state & READER_MASK == 0) {
- _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .seq_cst);
return true;
}
@@ -192,34 +192,34 @@ pub const DefaultRwLock = struct {
}
pub fn lock(rwl: *DefaultRwLock) void {
- _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .seq_cst);
rwl.mutex.lock();
- const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .SeqCst);
+ const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .seq_cst);
if (state & READER_MASK != 0)
rwl.semaphore.wait();
}
pub fn unlock(rwl: *DefaultRwLock) void {
- _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .seq_cst);
rwl.mutex.unlock();
}
pub fn tryLockShared(rwl: *DefaultRwLock) bool {
- const state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ const state = @atomicLoad(usize, &rwl.state, .seq_cst);
if (state & (IS_WRITING | WRITER_MASK) == 0) {
_ = @cmpxchgStrong(
usize,
&rwl.state,
state,
state + READER,
- .SeqCst,
- .SeqCst,
+ .seq_cst,
+ .seq_cst,
) orelse return true;
}
if (rwl.mutex.tryLock()) {
- _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
rwl.mutex.unlock();
return true;
}
@@ -228,25 +228,25 @@ pub const DefaultRwLock = struct {
}
pub fn lockShared(rwl: *DefaultRwLock) void {
- var state = @atomicLoad(usize, &rwl.state, .SeqCst);
+ var state = @atomicLoad(usize, &rwl.state, .seq_cst);
while (state & (IS_WRITING | WRITER_MASK) == 0) {
state = @cmpxchgWeak(
usize,
&rwl.state,
state,
state + READER,
- .SeqCst,
- .SeqCst,
+ .seq_cst,
+ .seq_cst,
) orelse return;
}
rwl.mutex.lock();
- _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
+ _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
rwl.mutex.unlock();
}
pub fn unlockShared(rwl: *DefaultRwLock) void {
- const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .SeqCst);
+ const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .seq_cst);
if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
rwl.semaphore.post();
@@ -318,12 +318,12 @@ test "concurrent access" {
self.rwl.lockShared();
defer self.rwl.unlockShared();
- if (self.writes >= num_writes or self.reads.load(.Unordered) >= num_reads)
+ if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
break;
try self.check();
- _ = self.reads.fetchAdd(1, .Monotonic);
+ _ = self.reads.fetchAdd(1, .monotonic);
}
}