aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorLemonBoy <thatlemon@gmail.com>2020-04-06 12:54:35 +0200
committerAndrew Kelley <andrew@ziglang.org>2020-04-06 12:52:53 -0400
commita0b73c9f02b3bb9e550d0283053cc9fc6c1dade6 (patch)
treebe500d4059b340ba39e48f648f76bc2c29ca4c1a /lib/std
parentc5ced0d74a40c8a8a46bf1b65ece17479384ce7c (diff)
downloadzig-a0b73c9f02b3bb9e550d0283053cc9fc6c1dade6.tar.gz
zig-a0b73c9f02b3bb9e550d0283053cc9fc6c1dade6.zip
compiler-rt: Separate max size allowed for load/store and CAS
The v6m ISA has no way to express a CAS loop natively without turning off the interrupts or using the kernel cmpxchg harness. On such a platform the user has to provide a few __sync_* builtins to satisfy the linker.
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/special/compiler_rt/atomics.zig27
1 files changed, 22 insertions, 5 deletions
diff --git a/lib/std/special/compiler_rt/atomics.zig b/lib/std/special/compiler_rt/atomics.zig
index 5cc7c42293..e2cb45bef3 100644
--- a/lib/std/special/compiler_rt/atomics.zig
+++ b/lib/std/special/compiler_rt/atomics.zig
@@ -99,13 +99,30 @@ comptime {
// LLVM emits those iff the object size is known and the pointers are correctly
// aligned.
-// The size (in bytes) of the biggest object that the architecture can access
-// atomically. Objects bigger than this threshold require the use of a lock.
+// The size (in bytes) of the biggest object that the architecture can
+// load/store atomically.
+// Objects bigger than this threshold require the use of a lock.
const largest_atomic_size = switch (builtin.arch) {
.x86_64 => 16,
else => @sizeOf(usize),
};
+// The size (in bytes) of the biggest object that the architecture can perform
+// an atomic CAS operation with.
+// Objects bigger than this threshold require the use of a lock.
+const largest_atomic_cas_size = switch (builtin.arch) {
+ .arm, .armeb, .thumb, .thumbeb =>
+ // The ARM v6m ISA has no ldrex/strex and so it's impossible to do CAS
+ // operations unless we're targeting Linux or the user provides the missing
+ // builtin functions.
+ if (std.Target.arm.featureSetHas(std.Target.current.cpu.features, .has_v6m) and
+ std.Target.current.os.tag != .linux)
+ 0
+ else
+ @sizeOf(usize),
+ else => @sizeOf(usize),
+};
+
fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
return struct {
fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
@@ -151,7 +168,7 @@ comptime {
fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
return struct {
fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
- if (@sizeOf(T) > largest_atomic_size) {
+ if (@sizeOf(T) > largest_atomic_cas_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
@@ -174,7 +191,7 @@ comptime {
fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
return struct {
fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
- if (@sizeOf(T) > largest_atomic_size) {
+ if (@sizeOf(T) > largest_atomic_cas_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
@@ -205,7 +222,7 @@ comptime {
fn fetchFn(comptime T: type, comptime op: builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
return struct {
pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
- if (@sizeOf(T) > largest_atomic_size) {
+ if (@sizeOf(T) > largest_atomic_cas_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();