diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2019-11-13 03:06:55 +0000 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2019-11-13 03:06:55 +0000 |
| commit | 8bae70454dabe77dfe7e5344e59ca2180d63af51 (patch) | |
| tree | bfb8f584993bf720414f5ab493b42aadd3c24e72 /lib/std/event | |
| parent | 32b37e695aa0581b863a395e0a28b7b4aa76c07d (diff) | |
| parent | 41914321b4593e3ed246cadda705e1076ab670d7 (diff) | |
| download | zig-8bae70454dabe77dfe7e5344e59ca2180d63af51.tar.gz zig-8bae70454dabe77dfe7e5344e59ca2180d63af51.zip | |
Merge pull request #3675 from Vexu/atomic-store
Add @atomicStore builtin
Diffstat (limited to 'lib/std/event')
| -rw-r--r-- | lib/std/event/channel.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/future.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/lock.zig | 10 | ||||
| -rw-r--r-- | lib/std/event/loop.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/rwlock.zig | 18 |
5 files changed, 20 insertions, 20 deletions
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig index 2ea99d234d..ac5a65e1b0 100644 --- a/lib/std/event/channel.zig +++ b/lib/std/event/channel.zig @@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type { fn dispatch(self: *SelfChannel) void { // set the "need dispatch" flag - _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.need_dispatch, 1, .SeqCst); lock: while (true) { // set the lock flag @@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type { if (prev_lock != 0) return; // clear the need_dispatch flag since we're about to do it - _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.need_dispatch, 0, .SeqCst); while (true) { one_dispatch: { diff --git a/lib/std/event/future.zig b/lib/std/event/future.zig index 43593b348a..5261db990c 100644 --- a/lib/std/event/future.zig +++ b/lib/std/event/future.zig @@ -62,12 +62,12 @@ pub fn Future(comptime T: type) type { pub async fn start(self: *Self) ?*T { const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null; switch (state) { - 1 => { + .Started => { const held = self.lock.acquire(); held.release(); return &self.data; }, - 2 => return &self.data, + .Finished => return &self.data, else => unreachable, } } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 576a09064f..a95c5bf7e2 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -31,8 +31,8 @@ pub const Lock = struct { } // We need to release the lock. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); + @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. @@ -56,8 +56,8 @@ pub const Lock = struct { } // Release the lock again. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); + @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); // Find out if we can be done. if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { @@ -101,7 +101,7 @@ pub const Lock = struct { // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor // will attempt to grab the lock. - _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst); const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); if (old_bit == 0) { diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 588cd3c8b5..8f01c19746 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -814,7 +814,7 @@ pub const Loop = struct { _ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable; }, .linux => { - _ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + @atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst); const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1); switch (os.linux.getErrno(rc)) { 0 => {}, @@ -837,7 +837,7 @@ pub const Loop = struct { fn posixFsRun(self: *Loop) void { while (true) { if (builtin.os == .linux) { - _ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst); + @atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst); } while (self.os_data.fs_queue.get()) |node| { switch (node.data.msg) { diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index 3a64b9df8c..ec4ab8f6d0 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -40,7 +40,7 @@ pub const RwLock = struct { return; } - _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst); if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; @@ -64,15 +64,15 @@ pub const RwLock = struct { // We need to release the write lock. Check if any readers are waiting to grab the lock. if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { // Switch to a read lock. - _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, .ReadLock, .SeqCst); + @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { global_event_loop.onNextTick(node); } return; } - _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst); + @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst); self.lock.commonPostUnlock(); } @@ -113,7 +113,7 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true; @@ -144,7 +144,7 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) { @@ -176,8 +176,8 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(State, &self.shared_state, .Xchg, .Unlocked, .SeqCst); + @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst); continue; } @@ -195,7 +195,7 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst); if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; |
