diff options
| author | mlugg <mlugg@mlugg.co.uk> | 2025-09-13 19:57:03 +0100 |
|---|---|---|
| committer | Matthew Lugg <mlugg@mlugg.co.uk> | 2025-10-18 09:28:43 +0100 |
| commit | b866c14328f0779b38f073b2ee3a8a3e437d129c (patch) | |
| tree | 4174d7f1b185acda2a1d04f387715e77af7a920c /lib/std/Thread | |
| parent | 7a5d2a196f2d78eb55f8ef294b4c899e0378f71e (diff) | |
| download | zig-b866c14328f0779b38f073b2ee3a8a3e437d129c.tar.gz zig-b866c14328f0779b38f073b2ee3a8a3e437d129c.zip | |
std: make RwLock test less intensive
This test called `yield` 80,000 times, which is nothing on a system with
little load, but murder on a CI system. macOS' scheduler in particular
doesn't seem to deal with this very well. The `yield` calls also weren't
even necessarily doing what they were meant to: if the optimizer could
figure out that it doesn't clobber some memory, then it could happily
reorder around the `yield`s anyway!
The test has been simplified and made to work better, and the number of
yields have been reduced. The number of overall iterations has also been
reduced, because with the `yield` calls making races very likely, we
don't really need to run too many iterations to be confident that the
implementation is race-free.
Diffstat (limited to 'lib/std/Thread')
| -rw-r--r-- | lib/std/Thread/RwLock.zig | 107 |
1 files changed, 56 insertions, 51 deletions
diff --git a/lib/std/Thread/RwLock.zig b/lib/std/Thread/RwLock.zig index 6f39221e36..3e032c7c00 100644 --- a/lib/std/Thread/RwLock.zig +++ b/lib/std/Thread/RwLock.zig @@ -301,81 +301,86 @@ test "concurrent access" { const num_writers: usize = 2; const num_readers: usize = 4; - const num_writes: usize = 4096; - const num_reads: usize = 8192; + const num_writes: usize = 1000; + const num_reads: usize = 2000; const Runner = struct { - const Self = @This(); + const Runner = @This(); - rwl: RwLock = .{}, - writes: usize = 0, - reads: std.atomic.Value(usize) = std.atomic.Value(usize).init(0), + rwl: RwLock, + writes: usize, + reads: std.atomic.Value(usize), - term1: usize = 0, - term2: usize = 0, - term_sum: usize = 0, + val_a: usize, + val_b: usize, - fn reader(self: *Self) !void { + fn reader(run: *Runner, thread_idx: usize) !void { + var prng = std.Random.DefaultPrng.init(thread_idx); + const rnd = prng.random(); while (true) { - self.rwl.lockShared(); - defer self.rwl.unlockShared(); + run.rwl.lockShared(); + defer run.rwl.unlockShared(); - if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads) - break; + try testing.expect(run.writes <= num_writes); + if (run.reads.fetchAdd(1, .monotonic) >= num_reads) break; - try self.check(); + // We use `volatile` accesses so that we can make sure the memory is accessed either + // side of a yield, maximising chances of a race. + const a_ptr: *const volatile usize = &run.val_a; + const b_ptr: *const volatile usize = &run.val_b; - _ = self.reads.fetchAdd(1, .monotonic); + const old_a = a_ptr.*; + if (rnd.boolean()) try std.Thread.yield(); + const old_b = b_ptr.*; + try testing.expect(old_a == old_b); } } - fn writer(self: *Self, thread_idx: usize) !void { + fn writer(run: *Runner, thread_idx: usize) !void { var prng = std.Random.DefaultPrng.init(thread_idx); - var rnd = prng.random(); - + const rnd = prng.random(); while (true) { - self.rwl.lock(); - defer self.rwl.unlock(); + run.rwl.lock(); + defer run.rwl.unlock(); - if (self.writes >= num_writes) - break; + try testing.expect(run.writes <= num_writes); + if (run.writes == num_writes) break; - try self.check(); + // We use `volatile` accesses so that we can make sure the memory is accessed either + // side of a yield, maximising chances of a race. + const a_ptr: *volatile usize = &run.val_a; + const b_ptr: *volatile usize = &run.val_b; - const term1 = rnd.int(usize); - self.term1 = term1; - try std.Thread.yield(); + const new_val = rnd.int(usize); - const term2 = rnd.int(usize); - self.term2 = term2; - try std.Thread.yield(); + const old_a = a_ptr.*; + a_ptr.* = new_val; + if (rnd.boolean()) try std.Thread.yield(); + const old_b = b_ptr.*; + b_ptr.* = new_val; + try testing.expect(old_a == old_b); - self.term_sum = term1 +% term2; - self.writes += 1; + run.writes += 1; } } - - fn check(self: *const Self) !void { - const term_sum = self.term_sum; - try std.Thread.yield(); - - const term2 = self.term2; - try std.Thread.yield(); - - const term1 = self.term1; - try testing.expectEqual(term_sum, term1 +% term2); - } }; - var runner = Runner{}; - var threads: [num_writers + num_readers]std.Thread = undefined; - - for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i }); - for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner}); + var run: Runner = .{ + .rwl = .{}, + .writes = 0, + .reads = .init(0), + .val_a = 0, + .val_b = 0, + }; + var write_threads: [num_writers]std.Thread = undefined; + var read_threads: [num_readers]std.Thread = undefined; - for (threads) |t| t.join(); + for (&write_threads, 0..) |*t, i| t.* = try .spawn(.{}, Runner.writer, .{ &run, i }); + for (&read_threads, num_writers..) |*t, i| t.* = try .spawn(.{}, Runner.reader, .{ &run, i }); - try testing.expectEqual(num_writes, runner.writes); + for (write_threads) |t| t.join(); + for (read_threads) |t| t.join(); - //std.debug.print("reads={}\n", .{ runner.reads.load(.unordered)}); + try testing.expect(run.writes == num_writes); + try testing.expect(run.reads.raw >= num_reads); } |
