aboutsummaryrefslogtreecommitdiff
path: root/std/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'std/atomic')
-rw-r--r--std/atomic/index.zig8
-rw-r--r--std/atomic/int.zig29
-rw-r--r--std/atomic/queue.zig (renamed from std/atomic/queue_mpmc.zig)88
-rw-r--r--std/atomic/queue_mpsc.zig185
-rw-r--r--std/atomic/stack.zig32
5 files changed, 103 insertions, 239 deletions
diff --git a/std/atomic/index.zig b/std/atomic/index.zig
index c0ea5be183..a94cff1973 100644
--- a/std/atomic/index.zig
+++ b/std/atomic/index.zig
@@ -1,9 +1,9 @@
pub const Stack = @import("stack.zig").Stack;
-pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc;
-pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc;
+pub const Queue = @import("queue.zig").Queue;
+pub const Int = @import("int.zig").Int;
test "std.atomic" {
_ = @import("stack.zig");
- _ = @import("queue_mpsc.zig");
- _ = @import("queue_mpmc.zig");
+ _ = @import("queue.zig");
+ _ = @import("int.zig");
}
diff --git a/std/atomic/int.zig b/std/atomic/int.zig
new file mode 100644
index 0000000000..d51454c673
--- /dev/null
+++ b/std/atomic/int.zig
@@ -0,0 +1,29 @@
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+
+/// Thread-safe, lock-free integer
+pub fn Int(comptime T: type) type {
+ return struct {
+ unprotected_value: T,
+
+ pub const Self = this;
+
+ pub fn init(init_val: T) Self {
+ return Self{ .unprotected_value = init_val };
+ }
+
+ /// Returns previous value
+ pub fn incr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+
+ /// Returns previous value
+ pub fn decr(self: *Self) T {
+ return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ pub fn get(self: *Self) T {
+ return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst);
+ }
+ };
+}
diff --git a/std/atomic/queue_mpmc.zig b/std/atomic/queue.zig
index 7ffc9f9ccb..1fd07714e8 100644
--- a/std/atomic/queue_mpmc.zig
+++ b/std/atomic/queue.zig
@@ -2,15 +2,13 @@ const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
-/// Many producer, many consumer, non-allocating, thread-safe, lock-free
-/// This implementation has a crippling limitation - it hangs onto node
-/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that
-/// node must not be freed until both the next get() and the next put() completes.
-pub fn QueueMpmc(comptime T: type) type {
+/// Many producer, many consumer, non-allocating, thread-safe.
+/// Uses a spinlock to protect get() and put().
+pub fn Queue(comptime T: type) type {
return struct {
- head: *Node,
- tail: *Node,
- root: Node,
+ head: ?*Node,
+ tail: ?*Node,
+ lock: u8,
pub const Self = this;
@@ -19,31 +17,48 @@ pub fn QueueMpmc(comptime T: type) type {
data: T,
};
- /// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
- pub fn init(self: *Self) void {
- self.root.next = null;
- self.head = &self.root;
- self.tail = &self.root;
+ pub fn init() Self {
+ return Self{
+ .head = null,
+ .tail = null,
+ .lock = 0,
+ };
}
pub fn put(self: *Self, node: *Node) void {
node.next = null;
- const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
- _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const opt_tail = self.tail;
+ self.tail = node;
+ if (opt_tail) |tail| {
+ tail.next = node;
+ } else {
+ assert(self.head == null);
+ self.head = node;
+ }
}
- /// node must not be freed until both the next get() and the next put() complete
pub fn get(self: *Self) ?*Node {
- var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
- while (true) {
- const node = head.next orelse return null;
- head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
- }
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const head = self.head orelse return null;
+ self.head = head.next;
+ if (head.next == null) self.tail = null;
+ return head;
+ }
+
+ pub fn isEmpty(self: *Self) bool {
+ return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null;
}
- ///// This is a debug function that is not thread-safe.
pub fn dump(self: *Self) void {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
std.debug.warn("head: ");
dumpRecursive(self.head, 0);
std.debug.warn("tail: ");
@@ -64,12 +79,12 @@ pub fn QueueMpmc(comptime T: type) type {
};
}
-const std = @import("std");
+const std = @import("../index.zig");
const assert = std.debug.assert;
const Context = struct {
allocator: *std.mem.Allocator,
- queue: *QueueMpmc(i32),
+ queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -84,7 +99,7 @@ const Context = struct {
const puts_per_thread = 500;
const put_thread_count = 3;
-test "std.atomic.queue_mpmc" {
+test "std.atomic.Queue" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
@@ -94,8 +109,7 @@ test "std.atomic.queue_mpmc" {
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
- var queue: QueueMpmc(i32) = undefined;
- queue.init();
+ var queue = Queue(i32).init();
var context = Context{
.allocator = a,
.queue = &queue,
@@ -140,7 +154,7 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(QueueMpmc(i32).Node{
+ const node = ctx.allocator.create(Queue(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
@@ -164,17 +178,16 @@ fn startGets(ctx: *Context) u8 {
}
}
-test "std.atomic.queue_mpmc single-threaded" {
- var queue: QueueMpmc(i32) = undefined;
- queue.init();
+test "std.atomic.Queue single-threaded" {
+ var queue = Queue(i32).init();
- var node_0 = QueueMpmc(i32).Node{
+ var node_0 = Queue(i32).Node{
.data = 0,
.next = undefined,
};
queue.put(&node_0);
- var node_1 = QueueMpmc(i32).Node{
+ var node_1 = Queue(i32).Node{
.data = 1,
.next = undefined,
};
@@ -182,13 +195,13 @@ test "std.atomic.queue_mpmc single-threaded" {
assert(queue.get().?.data == 0);
- var node_2 = QueueMpmc(i32).Node{
+ var node_2 = Queue(i32).Node{
.data = 2,
.next = undefined,
};
queue.put(&node_2);
- var node_3 = QueueMpmc(i32).Node{
+ var node_3 = Queue(i32).Node{
.data = 3,
.next = undefined,
};
@@ -198,15 +211,14 @@ test "std.atomic.queue_mpmc single-threaded" {
assert(queue.get().?.data == 2);
- var node_4 = QueueMpmc(i32).Node{
+ var node_4 = Queue(i32).Node{
.data = 4,
.next = undefined,
};
queue.put(&node_4);
assert(queue.get().?.data == 3);
- // if we were to set node_3.next to null here, it would cause this test
- // to fail. this demonstrates the limitation of hanging on to extra memory.
+ node_3.next = null;
assert(queue.get().?.data == 4);
diff --git a/std/atomic/queue_mpsc.zig b/std/atomic/queue_mpsc.zig
deleted file mode 100644
index 978e189453..0000000000
--- a/std/atomic/queue_mpsc.zig
+++ /dev/null
@@ -1,185 +0,0 @@
-const std = @import("../index.zig");
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const AtomicOrder = builtin.AtomicOrder;
-const AtomicRmwOp = builtin.AtomicRmwOp;
-
-/// Many producer, single consumer, non-allocating, thread-safe, lock-free
-pub fn QueueMpsc(comptime T: type) type {
- return struct {
- inboxes: [2]std.atomic.Stack(T),
- outbox: std.atomic.Stack(T),
- inbox_index: usize,
-
- pub const Self = this;
-
- pub const Node = std.atomic.Stack(T).Node;
-
- /// Not thread-safe. The call to init() must complete before any other functions are called.
- /// No deinitialization required.
- pub fn init() Self {
- return Self{
- .inboxes = []std.atomic.Stack(T){
- std.atomic.Stack(T).init(),
- std.atomic.Stack(T).init(),
- },
- .outbox = std.atomic.Stack(T).init(),
- .inbox_index = 0,
- };
- }
-
- /// Fully thread-safe. put() may be called from any thread at any time.
- pub fn put(self: *Self, node: *Node) void {
- const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst);
- const inbox = &self.inboxes[inbox_index];
- inbox.push(node);
- }
-
- /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
- /// the next call to get().
- pub fn get(self: *Self) ?*Node {
- if (self.outbox.pop()) |node| {
- return node;
- }
- const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
- const prev_inbox = &self.inboxes[prev_inbox_index];
- while (prev_inbox.pop()) |node| {
- self.outbox.push(node);
- }
- return self.outbox.pop();
- }
-
- /// Must be called by only 1 consumer at a time. Every call to get() and isEmpty() must complete before
- /// the next call to isEmpty().
- pub fn isEmpty(self: *Self) bool {
- if (!self.outbox.isEmpty()) return false;
- const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
- const prev_inbox = &self.inboxes[prev_inbox_index];
- while (prev_inbox.pop()) |node| {
- self.outbox.push(node);
- }
- return self.outbox.isEmpty();
- }
-
- /// For debugging only. No API guarantees about what this does.
- pub fn dump(self: *Self) void {
- {
- var it = self.outbox.root;
- while (it) |node| {
- std.debug.warn("0x{x} -> ", @ptrToInt(node));
- it = node.next;
- }
- }
- const inbox_index = self.inbox_index;
- const inboxes = []*std.atomic.Stack(T){
- &self.inboxes[self.inbox_index],
- &self.inboxes[1 - self.inbox_index],
- };
- for (inboxes) |inbox| {
- var it = inbox.root;
- while (it) |node| {
- std.debug.warn("0x{x} -> ", @ptrToInt(node));
- it = node.next;
- }
- }
-
- std.debug.warn("null\n");
- }
- };
-}
-
-const Context = struct {
- allocator: *std.mem.Allocator,
- queue: *QueueMpsc(i32),
- put_sum: isize,
- get_sum: isize,
- get_count: usize,
- puts_done: u8, // TODO make this a bool
-};
-
-// TODO add lazy evaluated build options and then put puts_per_thread behind
-// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
-// CI we would use a less aggressive setting since at 1 core, while we still
-// want this test to pass, we need a smaller value since there is so much thrashing
-// we would also use a less aggressive setting when running in valgrind
-const puts_per_thread = 500;
-const put_thread_count = 3;
-
-test "std.atomic.queue_mpsc" {
- var direct_allocator = std.heap.DirectAllocator.init();
- defer direct_allocator.deinit();
-
- var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
- defer direct_allocator.allocator.free(plenty_of_memory);
-
- var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
-
- var queue = QueueMpsc(i32).init();
- var context = Context{
- .allocator = a,
- .queue = &queue,
- .put_sum = 0,
- .get_sum = 0,
- .puts_done = 0,
- .get_count = 0,
- };
-
- var putters: [put_thread_count]*std.os.Thread = undefined;
- for (putters) |*t| {
- t.* = try std.os.spawnThread(&context, startPuts);
- }
- var getters: [1]*std.os.Thread = undefined;
- for (getters) |*t| {
- t.* = try std.os.spawnThread(&context, startGets);
- }
-
- for (putters) |t|
- t.wait();
- _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
- for (getters) |t|
- t.wait();
-
- if (context.put_sum != context.get_sum) {
- std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
- }
-
- if (context.get_count != puts_per_thread * put_thread_count) {
- std.debug.panic(
- "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
- context.get_count,
- u32(puts_per_thread),
- u32(put_thread_count),
- );
- }
-}
-
-fn startPuts(ctx: *Context) u8 {
- var put_count: usize = puts_per_thread;
- var r = std.rand.DefaultPrng.init(0xdeadbeef);
- while (put_count != 0) : (put_count -= 1) {
- std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
- const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(QueueMpsc(i32).Node{
- .next = undefined,
- .data = x,
- }) catch unreachable;
- ctx.queue.put(node);
- _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
- }
- return 0;
-}
-
-fn startGets(ctx: *Context) u8 {
- while (true) {
- const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
-
- while (ctx.queue.get()) |node| {
- std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
- _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
- _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
- }
-
- if (last) return 0;
- }
-}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index d74bee8e8b..16d5c9503b 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -1,10 +1,13 @@
+const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
-/// Many reader, many writer, non-allocating, thread-safe, lock-free
+/// Many reader, many writer, non-allocating, thread-safe
+/// Uses a spinlock to protect push() and pop()
pub fn Stack(comptime T: type) type {
return struct {
root: ?*Node,
+ lock: u8,
pub const Self = this;
@@ -14,7 +17,10 @@ pub fn Stack(comptime T: type) type {
};
pub fn init() Self {
- return Self{ .root = null };
+ return Self{
+ .root = null,
+ .lock = 0,
+ };
}
/// push operation, but only if you are the first item in the stack. if you did not succeed in
@@ -25,18 +31,20 @@ pub fn Stack(comptime T: type) type {
}
pub fn push(self: *Self, node: *Node) void {
- var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
- while (true) {
- node.next = root;
- root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break;
- }
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ node.next = self.root;
+ self.root = node;
}
pub fn pop(self: *Self) ?*Node {
- var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
- while (true) {
- root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root;
- }
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+
+ const root = self.root orelse return null;
+ self.root = root.next;
+ return root;
}
pub fn isEmpty(self: *Self) bool {
@@ -45,7 +53,7 @@ pub fn Stack(comptime T: type) type {
};
}
-const std = @import("std");
+const std = @import("../index.zig");
const Context = struct {
allocator: *std.mem.Allocator,
stack: *Stack(i32),