diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2022-05-04 14:06:54 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2022-05-04 17:34:16 -0700 |
| commit | 5b1c0d922c1061706ae1673333fcfb1d8fdd4602 (patch) | |
| tree | 4df989a3854adeaad833ad1c9970c972a77918b4 /src/target.zig | |
| parent | 259f784241fb44e0a1b570daaf31ba2b9f164106 (diff) | |
| download | zig-5b1c0d922c1061706ae1673333fcfb1d8fdd4602.tar.gz zig-5b1c0d922c1061706ae1673333fcfb1d8fdd4602.zip | |
stage2: improve semantics of atomic operations
ZIR instructions updated: atomic_load, atomic_rmw, atomic_store, cmpxchg
These no longer construct a pointer type as the result location. This
solves a TODO that was preventing the pointer from possibly being
volatile, as well as properly handling allowzero and addrspace.
It also allows the pointer to be over-aligned, which may be needed
depending on the target. As a consequence, the element type needs to be
communicated in the ZIR. This is done by strategically making one of the
operands be ResultLoc.ty instead of ResultLoc.coerced_ty if possible, or
otherwise explicitly adding elem_type into the ZIR encoding, such as in
the case of atomic_load.
The pointer type of atomic operations is now checked in Sema by coercing
it to an expected pointer type, that maybe over-aligned according to
target requirements.
Together with the previous commit, Zig now has smaller alignment for
large integers, depending on the target, and yet still has type safety
for atomic operations that specially require higher alignment.
Diffstat (limited to 'src/target.zig')
| -rw-r--r-- | src/target.zig | 68 |
1 files changed, 64 insertions, 4 deletions
diff --git a/src/target.zig b/src/target.zig index 27ed1118db..7818d496eb 100644 --- a/src/target.zig +++ b/src/target.zig @@ -1,5 +1,6 @@ const std = @import("std"); const llvm = @import("codegen/llvm/bindings.zig"); +const Type = @import("type.zig").Type; pub const ArchOsAbi = struct { arch: std.Target.Cpu.Arch, @@ -543,10 +544,28 @@ pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows; } -/// TODO this was ported from stage1 but it does not take into account CPU features, -/// which can affect this value. Audit this! -pub fn largestAtomicBits(target: std.Target) u32 { - return switch (target.cpu.arch) { +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returs 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +pub fn atomicPtrAlignment( + target: std.Target, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + // TODO this was ported from stage1 but it does not take into account CPU features, + // which can affect this value. Audit this! + const max_atomic_bits: u16 = switch (target.cpu.arch) { .avr, .msp430, .spu_2, @@ -611,6 +630,47 @@ pub fn largestAtomicBits(target: std.Target) u32 { .x86_64 => 128, }; + + var buffer: Type.Payload.Bits = undefined; + + const int_ty = switch (ty.zigTypeTag()) { + .Int => ty, + .Enum => ty.intTagType(&buffer), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + if (target.cpu.arch == .x86_64 and bit_count > 64) { + return 16; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime()) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(target).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + if (target.cpu.arch == .x86_64 and bit_count > 64) { + return 16; + } + + return 0; } pub fn defaultAddressSpace( |
