aboutsummaryrefslogtreecommitdiff
path: root/lib/std/event/lock.zig
diff options
context:
space:
mode:
authorVexu <git@vexu.eu>2020-03-10 22:46:19 +0200
committerVexu <git@vexu.eu>2020-03-10 22:54:47 +0200
commitee5b00a8b90ef375d0cd4432d31e3a4ed0b6f632 (patch)
treec775663438485092fca336a68a5bcbbc047de818 /lib/std/event/lock.zig
parent8dc188ebe06b5b78dcead521561858fc27e25204 (diff)
downloadzig-ee5b00a8b90ef375d0cd4432d31e3a4ed0b6f632.tar.gz
zig-ee5b00a8b90ef375d0cd4432d31e3a4ed0b6f632.zip
use atomic bools in std lib
Diffstat (limited to 'lib/std/event/lock.zig')
-rw-r--r--lib/std/event/lock.zig36
1 files changed, 17 insertions, 19 deletions
diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig
index b9cbb5d95f..6b27bbd8c4 100644
--- a/lib/std/event/lock.zig
+++ b/lib/std/event/lock.zig
@@ -11,9 +11,9 @@ const Loop = std.event.Loop;
/// Allows only one actor to hold the lock.
/// TODO: make this API also work in blocking I/O mode.
pub const Lock = struct {
- shared_bit: u8, // TODO make this a bool
+ shared: bool,
queue: Queue,
- queue_empty_bit: u8, // TODO make this a bool
+ queue_empty: bool,
const Queue = std.atomic.Queue(anyframe);
@@ -31,20 +31,19 @@ pub const Lock = struct {
}
// We need to release the lock.
- @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
- @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
+ @atomicStore(bool, &self.lock.shared, false, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
// But if there's a queue item, we are the actor which must loop and attempt
// to grab the lock again.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
+ if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
while (true) {
- const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst);
- if (old_bit != 0) {
+ if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) {
// We did not obtain the lock. Great, the queue is someone else's problem.
return;
}
@@ -56,11 +55,11 @@ pub const Lock = struct {
}
// Release the lock again.
- @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
- @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst);
+ @atomicStore(bool, &self.lock.shared, false, .SeqCst);
// Find out if we can be done.
- if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
+ if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) {
return;
}
}
@@ -69,24 +68,24 @@ pub const Lock = struct {
pub fn init() Lock {
return Lock{
- .shared_bit = 0,
+ .shared = false,
.queue = Queue.init(),
- .queue_empty_bit = 1,
+ .queue_empty = true,
};
}
pub fn initLocked() Lock {
return Lock{
- .shared_bit = 1,
+ .shared = true,
.queue = Queue.init(),
- .queue_empty_bit = 1,
+ .queue_empty = true,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *Lock) void {
- assert(self.shared_bit == 0);
+ assert(!self.shared);
while (self.queue.get()) |node| resume node.data;
}
@@ -99,12 +98,11 @@ pub const Lock = struct {
// At this point, we are in the queue, so we might have already been resumed.
- // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
+ // We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor
// will attempt to grab the lock.
- @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
+ @atomicStore(bool, &self.queue_empty, false, .SeqCst);
- const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
- if (old_bit == 0) {
+ if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) {
if (self.queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;