diff options
Diffstat (limited to 'lib/std')
| -rw-r--r-- | lib/std/atomic/int.zig | 13 | ||||
| -rw-r--r-- | lib/std/atomic/queue.zig | 20 | ||||
| -rw-r--r-- | lib/std/atomic/stack.zig | 31 | ||||
| -rw-r--r-- | lib/std/event/channel.zig | 23 | ||||
| -rw-r--r-- | lib/std/event/lock.zig | 36 | ||||
| -rw-r--r-- | lib/std/event/rwlock.zig | 32 |
6 files changed, 72 insertions, 83 deletions
diff --git a/lib/std/atomic/int.zig b/lib/std/atomic/int.zig index 94985b914f..446059e7ef 100644 --- a/lib/std/atomic/int.zig +++ b/lib/std/atomic/int.zig @@ -1,6 +1,3 @@ -const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; - /// Thread-safe, lock-free integer pub fn Int(comptime T: type) type { return struct { @@ -14,16 +11,16 @@ pub fn Int(comptime T: type) type { /// Returns previous value pub fn incr(self: *Self) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Add, 1, .SeqCst); } /// Returns previous value pub fn decr(self: *Self) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Sub, 1, .SeqCst); } pub fn get(self: *Self) T { - return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst); + return @atomicLoad(T, &self.unprotected_value, .SeqCst); } pub fn set(self: *Self, new_value: T) void { @@ -31,11 +28,11 @@ pub fn Int(comptime T: type) type { } pub fn xchg(self: *Self, new_value: T) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Xchg, new_value, .SeqCst); } pub fn fetchAdd(self: *Self, op: T) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, op, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Add, op, .SeqCst); } }; } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 1a0f39587e..52e200e7a2 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -1,7 +1,5 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; -const AtomicRmwOp = builtin.AtomicRmwOp; const assert = std.debug.assert; const expect = std.testing.expect; @@ -145,7 +143,7 @@ const Context = struct { put_sum: isize, get_sum: isize, get_count: usize, - puts_done: u8, // TODO make this a bool + puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind @@ -169,7 +167,7 @@ test "std.atomic.Queue" { .queue = &queue, .put_sum = 0, .get_sum = 0, - .puts_done = 0, + .puts_done = false, .get_count = 0, }; @@ -182,7 +180,7 @@ test "std.atomic.Queue" { } } expect(!context.queue.isEmpty()); - context.puts_done = 1; + context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { @@ -204,7 +202,7 @@ test "std.atomic.Queue" { for (putters) |t| t.wait(); - @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); + @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.wait(); @@ -231,25 +229,25 @@ fn startPuts(ctx: *Context) u8 { std.time.sleep(1); // let the os scheduler be our fuzz const x = @bitCast(i32, r.random.scalar(u32)); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; - node.* = Queue(i32).Node{ + node.* = .{ .prev = undefined, .next = undefined, .data = x, }; ctx.queue.put(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; + const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.queue.get()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); + _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 0f67a257cc..092dce15b0 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -1,6 +1,5 @@ const assert = std.debug.assert; const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; const expect = std.testing.expect; /// Many reader, many writer, non-allocating, thread-safe @@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type { root: ?*Node, lock: @TypeOf(lock_init), - const lock_init = if (builtin.single_threaded) {} else @as(u8, 0); + const lock_init = if (builtin.single_threaded) {} else false; pub const Self = @This(); @@ -31,7 +30,7 @@ pub fn Stack(comptime T: type) type { /// being the first item in the stack, returns the other item that was there. pub fn pushFirst(self: *Self, node: *Node) ?*Node { node.next = null; - return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst); + return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst); } pub fn push(self: *Self, node: *Node) void { @@ -39,8 +38,8 @@ pub fn Stack(comptime T: type) type { node.next = self.root; self.root = node; } else { - while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} - defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} + defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); node.next = self.root; self.root = node; @@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type { self.root = root.next; return root; } else { - while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} - defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} + defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); const root = self.root orelse return null; self.root = root.next; @@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type { } pub fn isEmpty(self: *Self) bool { - return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null; + return @atomicLoad(?*Node, &self.root, .SeqCst) == null; } }; } @@ -75,7 +74,7 @@ const Context = struct { put_sum: isize, get_sum: isize, get_count: usize, - puts_done: u8, // TODO make this a bool + puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor @@ -98,7 +97,7 @@ test "std.atomic.stack" { .stack = &stack, .put_sum = 0, .get_sum = 0, - .puts_done = 0, + .puts_done = false, .get_count = 0, }; @@ -109,7 +108,7 @@ test "std.atomic.stack" { expect(startPuts(&context) == 0); } } - context.puts_done = 1; + context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { @@ -128,7 +127,7 @@ test "std.atomic.stack" { for (putters) |t| t.wait(); - @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); + @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.wait(); } @@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 { .data = x, }; ctx.stack.push(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; + const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.stack.pop()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); + _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig index 3c5b48d047..83c77bcac5 100644 --- a/lib/std/event/channel.zig +++ b/lib/std/event/channel.zig @@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type { putters: std.atomic.Queue(PutNode), get_count: usize, put_count: usize, - dispatch_lock: u8, // TODO make this a bool - need_dispatch: u8, // TODO make this a bool + dispatch_lock: bool, + need_dispatch: bool, // simple fixed size ring buffer buffer_nodes: []T, @@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type { .buffer_len = 0, .buffer_nodes = buffer, .buffer_index = 0, - .dispatch_lock = 0, - .need_dispatch = 0, + .dispatch_lock = false, + .need_dispatch = false, .getters = std.atomic.Queue(GetNode).init(), .putters = std.atomic.Queue(PutNode).init(), .or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(), @@ -165,15 +165,14 @@ pub fn Channel(comptime T: type) type { fn dispatch(self: *SelfChannel) void { // set the "need dispatch" flag - @atomicStore(u8, &self.need_dispatch, 1, .SeqCst); + @atomicStore(bool, &self.need_dispatch, true, .SeqCst); lock: while (true) { // set the lock flag - const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst); - if (prev_lock != 0) return; + if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return; // clear the need_dispatch flag since we're about to do it - @atomicStore(u8, &self.need_dispatch, 0, .SeqCst); + @atomicStore(bool, &self.need_dispatch, false, .SeqCst); while (true) { one_dispatch: { @@ -250,14 +249,12 @@ pub fn Channel(comptime T: type) type { } // clear need-dispatch flag - const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); - if (need_dispatch != 0) continue; + if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue; - const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst); - assert(my_lock != 0); + assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst)); // we have to check again now that we unlocked - if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock; + if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock; return; } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 1bb51261d7..ff1f738c5e 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -11,9 +11,9 @@ const Loop = std.event.Loop; /// Allows only one actor to hold the lock. /// TODO: make this API also work in blocking I/O mode. pub const Lock = struct { - shared_bit: u8, // TODO make this a bool + shared: bool, queue: Queue, - queue_empty_bit: u8, // TODO make this a bool + queue_empty: bool, const Queue = std.atomic.Queue(anyframe); @@ -31,20 +31,19 @@ pub const Lock = struct { } // We need to release the lock. - @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); - @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); + @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst); + @atomicStore(bool, &self.lock.shared, false, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. // But if there's a queue item, we are the actor which must loop and attempt // to grab the lock again. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { + if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) { return; } while (true) { - const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst); - if (old_bit != 0) { + if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) { // We did not obtain the lock. Great, the queue is someone else's problem. return; } @@ -56,11 +55,11 @@ pub const Lock = struct { } // Release the lock again. - @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); - @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); + @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst); + @atomicStore(bool, &self.lock.shared, false, .SeqCst); // Find out if we can be done. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { + if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) { return; } } @@ -69,24 +68,24 @@ pub const Lock = struct { pub fn init() Lock { return Lock{ - .shared_bit = 0, + .shared = false, .queue = Queue.init(), - .queue_empty_bit = 1, + .queue_empty = true, }; } pub fn initLocked() Lock { return Lock{ - .shared_bit = 1, + .shared = true, .queue = Queue.init(), - .queue_empty_bit = 1, + .queue_empty = true, }; } /// Must be called when not locked. Not thread safe. /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *Lock) void { - assert(self.shared_bit == 0); + assert(!self.shared); while (self.queue.get()) |node| resume node.data; } @@ -99,12 +98,11 @@ pub const Lock = struct { // At this point, we are in the queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor + // We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor // will attempt to grab the lock. - @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.queue_empty, false, .SeqCst); - const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); - if (old_bit == 0) { + if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) { if (self.queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. resume node.data; diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index f4b13d008b..425088063f 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -16,8 +16,8 @@ pub const RwLock = struct { shared_state: State, writer_queue: Queue, reader_queue: Queue, - writer_queue_empty_bit: u8, // TODO make this a bool - reader_queue_empty_bit: u8, // TODO make this a bool + writer_queue_empty: bool, + reader_queue_empty: bool, reader_lock_count: usize, const State = enum(u8) { @@ -40,7 +40,7 @@ pub const RwLock = struct { return; } - @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.lock.reader_queue_empty, true, .SeqCst); if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; @@ -62,7 +62,7 @@ pub const RwLock = struct { } // We need to release the write lock. Check if any readers are waiting to grab the lock. - if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.lock.reader_queue_empty, .SeqCst)) { // Switch to a read lock. @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { @@ -71,7 +71,7 @@ pub const RwLock = struct { return; } - @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.lock.writer_queue_empty, true, .SeqCst); @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst); self.lock.commonPostUnlock(); @@ -79,12 +79,12 @@ pub const RwLock = struct { }; pub fn init() RwLock { - return RwLock{ + return .{ .shared_state = .Unlocked, .writer_queue = Queue.init(), - .writer_queue_empty_bit = 1, + .writer_queue_empty = true, .reader_queue = Queue.init(), - .reader_queue_empty_bit = 1, + .reader_queue_empty = true, .reader_lock_count = 0, }; } @@ -111,9 +111,9 @@ pub const RwLock = struct { // At this point, we are in the reader_queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, + // We set this bit so that later we can rely on the fact, that if reader_queue_empty == true, // some actor will attempt to grab the lock. - @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.reader_queue_empty, false, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true; @@ -142,9 +142,9 @@ pub const RwLock = struct { // At this point, we are in the writer_queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, + // We set this bit so that later we can rely on the fact, that if writer_queue_empty == true, // some actor will attempt to grab the lock. - @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.writer_queue_empty, false, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) { @@ -165,7 +165,7 @@ pub const RwLock = struct { // obtain the lock. // But if there's a writer_queue item or a reader_queue item, // we are the actor which must loop and attempt to grab the lock again. - if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.writer_queue_empty, .SeqCst)) { if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; @@ -176,12 +176,12 @@ pub const RwLock = struct { return; } // Release the lock again. - @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.writer_queue_empty, true, .SeqCst); @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst); continue; } - if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.reader_queue_empty, .SeqCst)) { if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; @@ -195,7 +195,7 @@ pub const RwLock = struct { return; } // Release the lock again. - @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.reader_queue_empty, true, .SeqCst); if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; |
