aboutsummaryrefslogtreecommitdiff
path: root/lib/std/Thread
diff options
context:
space:
mode:
authorDavid Rubin <87927264+Rexicon226@users.noreply.github.com>2024-10-04 15:21:27 -0700
committerGitHub <noreply@github.com>2024-10-04 22:21:27 +0000
commit043b1adb8dff184deaf9e145e6045b36b9bf733b (patch)
tree30c1e0e35c1e9b289ca1590431a59d9d44565520 /lib/std/Thread
parent163d505e27bfbdb0aed30339fcc98c0f5f364e7a (diff)
downloadzig-043b1adb8dff184deaf9e145e6045b36b9bf733b.tar.gz
zig-043b1adb8dff184deaf9e145e6045b36b9bf733b.zip
remove `@fence` (#21585)
closes #11650
Diffstat (limited to 'lib/std/Thread')
-rw-r--r--lib/std/Thread/Futex.zig21
-rw-r--r--lib/std/Thread/ResetEvent.zig8
-rw-r--r--lib/std/Thread/WaitGroup.zig3
3 files changed, 10 insertions, 22 deletions
diff --git a/lib/std/Thread/Futex.zig b/lib/std/Thread/Futex.zig
index fe22fa2011..da3fb916c6 100644
--- a/lib/std/Thread/Futex.zig
+++ b/lib/std/Thread/Futex.zig
@@ -794,9 +794,8 @@ const PosixImpl = struct {
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
// - T1: goes to sleep and misses both the ptr change and T2's wake up
//
- // seq_cst as Acquire barrier to ensure the announcement happens before the ptr check below.
- // seq_cst as shared modification order to form a happens-before edge with the fence(.seq_cst)+load() in wake().
- var pending = bucket.pending.fetchAdd(1, .seq_cst);
+ // acquire barrier to ensure the announcement happens before the ptr check below.
+ var pending = bucket.pending.fetchAdd(1, .acquire);
assert(pending < std.math.maxInt(usize));
// If the wait gets cancelled, remove the pending count we previously added.
@@ -858,15 +857,8 @@ const PosixImpl = struct {
//
// What we really want here is a Release load, but that doesn't exist under the C11 memory model.
// We could instead do `bucket.pending.fetchAdd(0, Release) == 0` which achieves effectively the same thing,
- // but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
- //
- // Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
- // fence(seq_cst) effectively converts the ptr update to seq_cst and the pending load to seq_cst: creating a Store-Load barrier.
- //
- // The pending count increment in wait() must also now use seq_cst for the update + this pending load
- // to be in the same modification order as our load isn't using release/acquire to guarantee it.
- bucket.pending.fence(.seq_cst);
- if (bucket.pending.load(.monotonic) == 0) {
+ // LLVM lowers the fetchAdd(0, .release) into an mfence+load which avoids gaining ownership of the cache-line.
+ if (bucket.pending.fetchAdd(0, .release) == 0) {
return;
}
@@ -979,15 +971,14 @@ test "broadcasting" {
fn wait(self: *@This()) !void {
// Decrement the counter.
// Release ensures stuff before this barrier.wait() happens before the last one.
- const count = self.count.fetchSub(1, .release);
+ // Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
+ const count = self.count.fetchSub(1, .acq_rel);
try testing.expect(count <= num_threads);
try testing.expect(count > 0);
// First counter to reach zero wakes all other threads.
- // Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
if (count - 1 == 0) {
- _ = self.count.load(.acquire); // TODO: could be fence(acquire) if not for TSAN
self.futex.store(1, .release);
Futex.wake(&self.futex, num_threads - 1);
return;
diff --git a/lib/std/Thread/ResetEvent.zig b/lib/std/Thread/ResetEvent.zig
index cbc5a2a31c..47a9b0c038 100644
--- a/lib/std/Thread/ResetEvent.zig
+++ b/lib/std/Thread/ResetEvent.zig
@@ -112,9 +112,9 @@ const FutexImpl = struct {
// Try to set the state from `unset` to `waiting` to indicate
// to the set() thread that others are blocked on the ResetEvent.
// We avoid using any strict barriers until the end when we know the ResetEvent is set.
- var state = self.state.load(.monotonic);
+ var state = self.state.load(.acquire);
if (state == unset) {
- state = self.state.cmpxchgStrong(state, waiting, .monotonic, .monotonic) orelse waiting;
+ state = self.state.cmpxchgStrong(state, waiting, .acquire, .acquire) orelse waiting;
}
// Wait until the ResetEvent is set since the state is waiting.
@@ -124,7 +124,7 @@ const FutexImpl = struct {
const wait_result = futex_deadline.wait(&self.state, waiting);
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
- state = self.state.load(.monotonic);
+ state = self.state.load(.acquire);
if (state != waiting) {
break;
}
@@ -133,9 +133,7 @@ const FutexImpl = struct {
}
}
- // Acquire barrier ensures memory accesses before set() happen before we return.
assert(state == is_set);
- self.state.fence(.acquire);
}
fn set(self: *Impl) void {
diff --git a/lib/std/Thread/WaitGroup.zig b/lib/std/Thread/WaitGroup.zig
index cff474c863..bdc49587bf 100644
--- a/lib/std/Thread/WaitGroup.zig
+++ b/lib/std/Thread/WaitGroup.zig
@@ -15,11 +15,10 @@ pub fn start(self: *WaitGroup) void {
}
pub fn finish(self: *WaitGroup) void {
- const state = self.state.fetchSub(one_pending, .release);
+ const state = self.state.fetchSub(one_pending, .acq_rel);
assert((state / one_pending) > 0);
if (state == (one_pending | is_waiting)) {
- self.state.fence(.acquire);
self.event.set();
}
}