diff options
| author | Andrew Kelley <superjoe30@gmail.com> | 2018-10-03 13:19:10 -0400 |
|---|---|---|
| committer | Andrew Kelley <superjoe30@gmail.com> | 2018-10-03 13:19:10 -0400 |
| commit | 66cb75d1148fffdd161e7829b9e27aa52f0f1616 (patch) | |
| tree | f682927047f57940ebbd33ab81e254d69b731a39 /std/mutex.zig | |
| parent | acefcdbca58efd97bf5346eb7dae22c49efa1a3d (diff) | |
| download | zig-66cb75d1148fffdd161e7829b9e27aa52f0f1616.tar.gz zig-66cb75d1148fffdd161e7829b9e27aa52f0f1616.zip | |
std.Mutex: implement blocking mutexes on linux
closes #1463
Thanks to Shawn Landden for the original pull request.
This commit is based on that code.
Diffstat (limited to 'std/mutex.zig')
| -rw-r--r-- | std/mutex.zig | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/std/mutex.zig b/std/mutex.zig index 6aee87d1d7..9dc0c23d6d 100644 --- a/std/mutex.zig +++ b/std/mutex.zig @@ -3,25 +3,74 @@ const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; const assert = std.debug.assert; +const SpinLock = std.SpinLock; +const linux = std.os.linux; -/// TODO use syscalls instead of a spinlock +/// Lock may be held only once. If the same thread +/// tries to acquire the same mutex twice, it deadlocks. pub const Mutex = struct { - lock: u8, // TODO use a bool + /// 0: unlocked + /// 1: locked, no waiters + /// 2: locked, one or more waiters + linux_lock: @typeOf(linux_lock_init), + + /// TODO better implementation than spin lock + spin_lock: @typeOf(spin_lock_init), + + const linux_lock_init = if (builtin.os == builtin.Os.linux) i32(0) else {}; + const spin_lock_init = if (builtin.os != builtin.Os.linux) SpinLock.init() else {}; pub const Held = struct { mutex: *Mutex, pub fn release(self: Held) void { - assert(@atomicRmw(u8, &self.mutex.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + if (builtin.os == builtin.Os.linux) { + // Always unlock. If the previous state was Locked-No-Waiters, then we're done. + // Otherwise, wake a waiter up. + const prev = @atomicRmw(i32, &self.mutex.linux_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.Release); + if (prev != 1) { + assert(prev == 2); + const rc = linux.futex_wake(&self.mutex.linux_lock, linux.FUTEX_WAKE, 1); + switch (linux.getErrno(rc)) { + 0 => {}, + linux.EINVAL => unreachable, + else => unreachable, + } + } + } else { + SpinLock.Held.release(SpinLock.Held{ .spinlock = &self.mutex.spin_lock }); + } } }; pub fn init() Mutex { - return Mutex{ .lock = 0 }; + return Mutex{ + .linux_lock = linux_lock_init, + .spin_lock = spin_lock_init, + }; } pub fn acquire(self: *Mutex) Held { - while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} + if (builtin.os == builtin.Os.linux) { + // First try to go from Unlocked to Locked-No-Waiters. If this succeeds, no syscalls are needed. + // Otherwise, we need to be in the Locked-With-Waiters state. If we are already in that state, + // proceed to futex_wait. Otherwise, try to go from Locked-No-Waiters to Locked-With-Waiters. + // If that succeeds, proceed to futex_wait. Otherwise start the whole loop over again. + while (@cmpxchgWeak(i32, &self.linux_lock, 0, 1, AtomicOrder.Acquire, AtomicOrder.Monotonic)) |l| { + if (l == 2 or + @cmpxchgWeak(i32, &self.linux_lock, 1, 2, AtomicOrder.Acquire, AtomicOrder.Monotonic) == null) + { + const rc = linux.futex_wait(&self.linux_lock, linux.FUTEX_WAIT, 2, null); + switch (linux.getErrno(rc)) { + 0, linux.EINTR, linux.EAGAIN => continue, + linux.EINVAL => unreachable, + else => unreachable, + } + } + } + } else { + _ = self.spin_lock.acquire(); + } return Held{ .mutex = self }; } }; |
