aboutsummaryrefslogtreecommitdiff
path: root/lib/std/atomic
diff options
context:
space:
mode:
authorLoris Cro <kappaloris@gmail.com>2023-06-18 09:06:40 +0200
committerGitHub <noreply@github.com>2023-06-18 09:06:40 +0200
commit216ef10dc471e4db60a30208be178d6c59efeaaf (patch)
tree8c239dab283ae9cb3b7fe099bae240bcc53f894e /lib/std/atomic
parent0fc1d396495c1ab482197021dedac8bea3f9401c (diff)
parent729a051e9e38674233190aea23c0ac8c134f2d67 (diff)
downloadzig-216ef10dc471e4db60a30208be178d6c59efeaaf.tar.gz
zig-216ef10dc471e4db60a30208be178d6c59efeaaf.zip
Merge branch 'master' into autodoc-searchkey
Diffstat (limited to 'lib/std/atomic')
-rw-r--r--lib/std/atomic/Atomic.zig21
1 files changed, 7 insertions, 14 deletions
diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig
index 51e61ca628..6c0c477725 100644
--- a/lib/std/atomic/Atomic.zig
+++ b/lib/std/atomic/Atomic.zig
@@ -31,7 +31,7 @@ pub fn Atomic(comptime T: type) type {
/// // Release ensures code before unref() happens-before the count is decremented as dropFn could be called by then.
/// if (self.count.fetchSub(1, .Release)) {
/// // Acquire ensures count decrement and code before previous unrefs()s happens-before we call dropFn below.
- /// // NOTE: another alterative is to use .AcqRel on the fetchSub count decrement but it's extra barrier in possibly hot path.
+ /// // NOTE: another alternative is to use .AcqRel on the fetchSub count decrement but it's extra barrier in possibly hot path.
/// self.count.fence(.Acquire);
/// (self.dropFn)(self);
/// }
@@ -374,10 +374,6 @@ const atomic_rmw_orderings = [_]Ordering{
};
test "Atomic.swap" {
- // TODO: Re-enable when LLVM is released with a bugfix for isel of
- // atomic load (currently fixed on trunk, broken on 15.0.2)
- if (builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
-
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(usize).init(5);
try testing.expectEqual(x.swap(10, ordering), 5);
@@ -546,9 +542,8 @@ test "Atomic.bitSet" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
- const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array, 0..) |_, bit_index| {
+ for (0..@bitSizeOf(Int)) |bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -562,7 +557,7 @@ test "Atomic.bitSet" {
try testing.expect(x.load(.SeqCst) & mask != 0);
// all the previous bits should have not changed (still be set)
- for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
+ for (0..bit_index) |prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
@@ -576,9 +571,8 @@ test "Atomic.bitReset" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
- const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array, 0..) |_, bit_index| {
+ for (0..@bitSizeOf(Int)) |bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
x.storeUnchecked(x.loadUnchecked() | mask);
@@ -593,7 +587,7 @@ test "Atomic.bitReset" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be reset)
- for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
+ for (0..bit_index) |prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
@@ -607,9 +601,8 @@ test "Atomic.bitToggle" {
inline for (atomicIntTypes()) |Int| {
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(Int).init(0);
- const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array, 0..) |_, bit_index| {
+ for (0..@bitSizeOf(Int)) |bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -623,7 +616,7 @@ test "Atomic.bitToggle" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be toggled back)
- for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
+ for (0..bit_index) |prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);