aboutsummaryrefslogtreecommitdiff
path: root/lib/std/Thread/Mutex.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-11-23 04:55:28 -0500
committerGitHub <noreply@github.com>2023-11-23 04:55:28 -0500
commit2bffd810157a8e7c0b2500a13921dd8c45694b8a (patch)
tree2f8cfbc0403c5bd492ca68f1aa7ba6b02886ef2c /lib/std/Thread/Mutex.zig
parent115ec25f2e4eed5033f34eaee8bf3477ff417ecc (diff)
parent70931dbdea96d92feb60406c827e39e566317863 (diff)
downloadzig-2bffd810157a8e7c0b2500a13921dd8c45694b8a.tar.gz
zig-2bffd810157a8e7c0b2500a13921dd8c45694b8a.zip
Merge pull request #18085 from ziglang/std-atomics
rework std.atomic
Diffstat (limited to 'lib/std/Thread/Mutex.zig')
-rw-r--r--lib/std/Thread/Mutex.zig27
1 files changed, 9 insertions, 18 deletions
diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig
index 0f618516b5..a9024e6c5d 100644
--- a/lib/std/Thread/Mutex.zig
+++ b/lib/std/Thread/Mutex.zig
@@ -26,7 +26,6 @@ const Mutex = @This();
const os = std.os;
const assert = std.debug.assert;
const testing = std.testing;
-const Atomic = std.atomic.Atomic;
const Thread = std.Thread;
const Futex = Thread.Futex;
@@ -67,7 +66,7 @@ else
FutexImpl;
const DebugImpl = struct {
- locking_thread: Atomic(Thread.Id) = Atomic(Thread.Id).init(0), // 0 means it's not locked.
+ locking_thread: std.atomic.Value(Thread.Id) = std.atomic.Value(Thread.Id).init(0), // 0 means it's not locked.
impl: ReleaseImpl = .{},
inline fn tryLock(self: *@This()) bool {
@@ -151,37 +150,29 @@ const DarwinImpl = struct {
};
const FutexImpl = struct {
- state: Atomic(u32) = Atomic(u32).init(unlocked),
+ state: std.atomic.Value(u32) = std.atomic.Value(u32).init(unlocked),
- const unlocked = 0b00;
- const locked = 0b01;
- const contended = 0b11; // must contain the `locked` bit for x86 optimization below
-
- fn tryLock(self: *@This()) bool {
- // Lock with compareAndSwap instead of tryCompareAndSwap to avoid reporting spurious CAS failure.
- return self.lockFast("compareAndSwap");
- }
+ const unlocked: u32 = 0b00;
+ const locked: u32 = 0b01;
+ const contended: u32 = 0b11; // must contain the `locked` bit for x86 optimization below
fn lock(self: *@This()) void {
- // Lock with tryCompareAndSwap instead of compareAndSwap due to being more inline-able on LL/SC archs like ARM.
- if (!self.lockFast("tryCompareAndSwap")) {
+ if (!self.tryLock())
self.lockSlow();
- }
}
- inline fn lockFast(self: *@This(), comptime cas_fn_name: []const u8) bool {
+ fn tryLock(self: *@This()) bool {
// On x86, use `lock bts` instead of `lock cmpxchg` as:
// - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048
// - `lock bts` is smaller instruction-wise which makes it better for inlining
if (comptime builtin.target.cpu.arch.isX86()) {
- const locked_bit = @ctz(@as(u32, locked));
+ const locked_bit = @ctz(locked);
return self.state.bitSet(locked_bit, .Acquire) == 0;
}
// Acquire barrier ensures grabbing the lock happens before the critical section
// and that the previous lock holder's critical section happens before we grab the lock.
- const casFn = @field(@TypeOf(self.state), cas_fn_name);
- return casFn(&self.state, unlocked, locked, .Acquire, .Monotonic) == null;
+ return self.state.cmpxchgWeak(unlocked, locked, .Acquire, .Monotonic) == null;
}
fn lockSlow(self: *@This()) void {