diff options
| -rw-r--r-- | CMakeLists.txt | 3 | ||||
| -rw-r--r-- | LICENSE | 2 | ||||
| -rw-r--r-- | build.zig | 2 | ||||
| -rw-r--r-- | lib/std/Thread.zig | 37 | ||||
| -rw-r--r-- | lib/std/Thread/Condition.zig | 2 | ||||
| -rw-r--r-- | lib/std/Thread/Mutex.zig | 4 | ||||
| -rw-r--r-- | lib/std/Thread/StaticResetEvent.zig | 4 | ||||
| -rw-r--r-- | lib/std/atomic.zig | 76 | ||||
| -rw-r--r-- | lib/std/atomic/Atomic.zig | 522 | ||||
| -rw-r--r-- | lib/std/atomic/bool.zig | 55 | ||||
| -rw-r--r-- | lib/std/atomic/int.zig | 92 | ||||
| -rw-r--r-- | lib/std/crypto/pcurves/p256/scalar.zig | 6 | ||||
| -rw-r--r-- | lib/std/json.zig | 160 | ||||
| -rw-r--r-- | lib/std/os.zig | 2 | ||||
| -rw-r--r-- | lib/std/packed_int_array.zig | 6 | ||||
| -rw-r--r-- | lib/std/target.zig | 7 | ||||
| -rw-r--r-- | src/BuiltinFn.zig | 2 |
17 files changed, 775 insertions, 207 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index d7bfe5a7a6..099b7a40f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -337,8 +337,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/lib/std/array_list.zig" "${CMAKE_SOURCE_DIR}/lib/std/ascii.zig" "${CMAKE_SOURCE_DIR}/lib/std/atomic.zig" - "${CMAKE_SOURCE_DIR}/lib/std/atomic/bool.zig" - "${CMAKE_SOURCE_DIR}/lib/std/atomic/int.zig" + "${CMAKE_SOURCE_DIR}/lib/std/atomic/Atomic.zig" "${CMAKE_SOURCE_DIR}/lib/std/atomic/queue.zig" "${CMAKE_SOURCE_DIR}/lib/std/atomic/stack.zig" "${CMAKE_SOURCE_DIR}/lib/std/base64.zig" @@ -1,6 +1,6 @@ The MIT License (Expat) -Copyright (c) 2015 Andrew Kelley +Copyright (c) 2015-2021, Zig contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -741,7 +741,7 @@ const softfloat_sources = [_][]const u8{ const stage1_sources = [_][]const u8{ "src/stage1/analyze.cpp", - "src/stage1/ast_render.cpp", + "src/stage1/astgen.cpp", "src/stage1/bigfloat.cpp", "src/stage1/bigint.cpp", "src/stage1/buffer.cpp", diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index fbb3d927e2..fb74f09677 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -67,33 +67,7 @@ else switch (std.Target.current.os.tag) { else => struct {}, }; -/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock"). -pub inline fn spinLoopHint() void { - switch (std.Target.current.cpu.arch) { - .i386, .x86_64 => { - asm volatile ("pause" ::: "memory"); - }, - .arm, .armeb, .thumb, .thumbeb => { - // `yield` was introduced in v6k but are also available on v6m. - const can_yield = comptime std.Target.arm.featureSetHasAny(std.Target.current.cpu.features, .{ .has_v6k, .has_v6m }); - if (can_yield) asm volatile ("yield" ::: "memory") - // Fallback. - else asm volatile ("" ::: "memory"); - }, - .aarch64, .aarch64_be, .aarch64_32 => { - asm volatile ("isb" ::: "memory"); - }, - .powerpc64, .powerpc64le => { - // No-op that serves as `yield` hint. - asm volatile ("or 27, 27, 27" ::: "memory"); - }, - else => { - // Do nothing but prevent the compiler from optimizing away the - // spinning loop. - asm volatile ("" ::: "memory"); - }, - } -} +pub const spinLoopHint = @compileError("deprecated: use std.atomic.spinLoopHint"); /// Returns the ID of the calling thread. /// Makes a syscall every time the function is called. @@ -597,8 +571,13 @@ pub fn getCurrentThreadId() u64 { } } -test { +test "std.Thread" { if (!builtin.single_threaded) { - std.testing.refAllDecls(@This()); + _ = AutoResetEvent; + _ = ResetEvent; + _ = StaticResetEvent; + _ = Mutex; + _ = Semaphore; + _ = Condition; } } diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig index a14b57f6b4..d88a6de31e 100644 --- a/lib/std/Thread/Condition.zig +++ b/lib/std/Thread/Condition.zig @@ -115,7 +115,7 @@ pub const AtomicCondition = struct { else => unreachable, } }, - else => spinLoopHint(), + else => std.atomic.spinLoopHint(), } } } diff --git a/lib/std/Thread/Mutex.zig b/lib/std/Thread/Mutex.zig index cae4e282de..49f138079d 100644 --- a/lib/std/Thread/Mutex.zig +++ b/lib/std/Thread/Mutex.zig @@ -126,7 +126,7 @@ pub const AtomicMutex = struct { var iter = std.math.min(32, spin + 1); while (iter > 0) : (iter -= 1) - std.Thread.spinLoopHint(); + std.atomic.spinLoopHint(); } new_state = .waiting; @@ -149,7 +149,7 @@ pub const AtomicMutex = struct { else => unreachable, } }, - else => std.Thread.spinLoopHint(), + else => std.atomic.spinLoopHint(), } } } diff --git a/lib/std/Thread/StaticResetEvent.zig b/lib/std/Thread/StaticResetEvent.zig index 75bea463aa..0a6a1d568e 100644 --- a/lib/std/Thread/StaticResetEvent.zig +++ b/lib/std/Thread/StaticResetEvent.zig @@ -182,7 +182,7 @@ pub const AtomicEvent = struct { timer = time.Timer.start() catch return error.TimedOut; while (@atomicLoad(u32, waiters, .Acquire) != WAKE) { - std.os.sched_yield() catch std.Thread.spinLoopHint(); + std.os.sched_yield() catch std.atomic.spinLoopHint(); if (timeout) |timeout_ns| { if (timer.read() >= timeout_ns) return error.TimedOut; @@ -293,7 +293,7 @@ pub const AtomicEvent = struct { return @intToPtr(?windows.HANDLE, handle); }, LOADING => { - std.os.sched_yield() catch std.Thread.spinLoopHint(); + std.os.sched_yield() catch std.atomic.spinLoopHint(); handle = @atomicLoad(usize, &event_handle, .Monotonic); }, else => { diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index ab80fce872..224b57d1d2 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -3,14 +3,82 @@ // This file is part of [zig](https://ziglang.org/), which is MIT licensed. // The MIT license requires this copyright notice to be included in all copies // and substantial portions of the software. + +const std = @import("std.zig"); +const target = std.Target.current; + +pub const Ordering = std.builtin.AtomicOrder; + pub const Stack = @import("atomic/stack.zig").Stack; pub const Queue = @import("atomic/queue.zig").Queue; -pub const Bool = @import("atomic/bool.zig").Bool; -pub const Int = @import("atomic/int.zig").Int; +pub const Atomic = @import("atomic/Atomic.zig").Atomic; test "std.atomic" { _ = @import("atomic/stack.zig"); _ = @import("atomic/queue.zig"); - _ = @import("atomic/bool.zig"); - _ = @import("atomic/int.zig"); + _ = @import("atomic/Atomic.zig"); +} + +pub fn fence(comptime ordering: Ordering) callconv(.Inline) void { + switch (ordering) { + .Acquire, .Release, .AcqRel, .SeqCst => { + @fence(ordering); + }, + else => { + @compileLog(ordering, " only applies to a given memory location"); + }, + } +} + +pub fn compilerFence(comptime ordering: Ordering) callconv(.Inline) void { + switch (ordering) { + .Acquire, .Release, .AcqRel, .SeqCst => asm volatile ("" ::: "memory"), + else => @compileLog(ordering, " only applies to a given memory location"), + } +} + +test "fence/compilerFence" { + inline for (.{ .Acquire, .Release, .AcqRel, .SeqCst }) |ordering| { + compilerFence(ordering); + fence(ordering); + } +} + +/// Signals to the processor that the caller is inside a busy-wait spin-loop. +pub fn spinLoopHint() callconv(.Inline) void { + const hint_instruction = switch (target.cpu.arch) { + // No-op instruction that can hint to save (or share with a hardware-thread) pipelining/power resources + // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html + .i386, .x86_64 => "pause", + + // No-op instruction that serves as a hardware-thread resource yield hint. + // https://stackoverflow.com/a/7588941 + .powerpc64, .powerpc64le => "or 27, 27, 27", + + // `isb` appears more reliable for releasing execution resources than `yield` on common aarch64 CPUs. + // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604 + // https://bugs.mysql.com/bug.php?id=100664 + .aarch64, .aarch64_be, .aarch64_32 => "isb", + + // `yield` was introduced in v6k but is also available on v6m. + // https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm + .arm, .armeb, .thumb, .thumbeb => blk: { + const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ .has_v6k, .has_v6m }); + const instruction = if (can_yield) "yield" else ""; + break :blk instruction; + }, + + else => "", + }; + + // Memory barrier to prevent the compiler from optimizing away the spin-loop + // even if no hint_instruction was provided. + asm volatile (hint_instruction ::: "memory"); +} + +test "spinLoopHint" { + var i: usize = 10; + while (i > 0) : (i -= 1) { + spinLoopHint(); + } } diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig new file mode 100644 index 0000000000..5c3b865a6a --- /dev/null +++ b/lib/std/atomic/Atomic.zig @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: MIT +// Copyright (c) 2015-2021 Zig Contributors +// This file is part of [zig](https://ziglang.org/), which is MIT licensed. +// The MIT license requires this copyright notice to be included in all copies +// and substantial portions of the software. + +const std = @import("../std.zig"); + +const testing = std.testing; +const target = std.Target.current; +const Ordering = std.atomic.Ordering; + +pub fn Atomic(comptime T: type) type { + return extern struct { + value: T, + + const Self = @This(); + + pub fn init(value: T) Self { + return .{ .value = value }; + } + + /// Non-atomically load from the atomic value without synchronization. + /// Care must be taken to avoid data-races when interacting with other atomic operations. + pub fn loadUnchecked(self: Self) T { + return self.value; + } + + /// Non-atomically store to the atomic value without synchronization. + /// Care must be taken to avoid data-races when interacting with other atomic operations. + pub fn storeUnchecked(self: *Self, value: T) void { + self.value = value; + } + + pub fn load(self: *const Self, comptime ordering: Ordering) T { + return switch (ordering) { + .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on atomic stores"), + .Release => @compileError(@tagName(ordering) ++ " is only allowed on atomic stores"), + else => @atomicLoad(T, &self.value, ordering), + }; + } + + pub fn store(self: *Self, value: T, comptime ordering: Ordering) void { + return switch (ordering) { + .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Acquire) ++ " which is only allowed on atomic loads"), + .Acquire => @compileError(@tagName(ordering) ++ " is only allowed on atomic loads"), + else => @atomicStore(T, &self.value, value, ordering), + }; + } + + pub fn swap(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Xchg, value, ordering); + } + + pub fn compareAndSwap( + self: *Self, + compare: T, + exchange: T, + comptime success: Ordering, + comptime failure: Ordering, + ) callconv(.Inline) ?T { + return self.cmpxchg(true, compare, exchange, success, failure); + } + + pub fn tryCompareAndSwap( + self: *Self, + compare: T, + exchange: T, + comptime success: Ordering, + comptime failure: Ordering, + ) callconv(.Inline) ?T { + return self.cmpxchg(false, compare, exchange, success, failure); + } + + fn cmpxchg( + self: *Self, + comptime is_strong: bool, + compare: T, + exchange: T, + comptime success: Ordering, + comptime failure: Ordering, + ) callconv(.Inline) ?T { + if (success == .Unordered or failure == .Unordered) { + @compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores"); + } + + comptime var success_is_stronger = switch (failure) { + .SeqCst => success == .SeqCst, + .AcqRel => @compileError(@tagName(failure) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on success"), + .Acquire => success == .SeqCst or success == .AcqRel or success == .Acquire, + .Release => @compileError(@tagName(failure) ++ " is only allowed on success"), + .Monotonic => true, + .Unordered => unreachable, + }; + + if (!success_is_stronger) { + @compileError(@tagName(success) ++ " must be stronger than " ++ @tagName(failure)); + } + + return switch (is_strong) { + true => @cmpxchgStrong(T, &self.value, compare, exchange, success, failure), + false => @cmpxchgWeak(T, &self.value, compare, exchange, success, failure), + }; + } + + fn rmw( + self: *Self, + comptime op: std.builtin.AtomicRmwOp, + value: T, + comptime ordering: Ordering, + ) callconv(.Inline) T { + return @atomicRmw(T, &self.value, op, value, ordering); + } + + fn exportWhen(comptime condition: bool, comptime functions: type) type { + return if (condition) functions else struct {}; + } + + pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct { + pub fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Add, value, ordering); + } + + pub fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Sub, value, ordering); + } + + pub fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Min, value, ordering); + } + + pub fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Max, value, ordering); + } + }); + + pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct { + pub fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.And, value, ordering); + } + + pub fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Nand, value, ordering); + } + + pub fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Or, value, ordering); + } + + pub fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + return self.rmw(.Xor, value, ordering); + } + + const Bit = std.math.Log2Int(T); + const BitRmwOp = enum { + Set, + Reset, + Toggle, + }; + + pub fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + return bitRmw(self, .Set, bit, ordering); + } + + pub fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + return bitRmw(self, .Reset, bit, ordering); + } + + pub fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + return bitRmw(self, .Toggle, bit, ordering); + } + + fn bitRmw( + self: *Self, + comptime op: BitRmwOp, + bit: Bit, + comptime ordering: Ordering, + ) callconv(.Inline) u1 { + // x86 supports dedicated bitwise instructions + if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) { + const instruction = switch (op) { + .Set => "lock bts", + .Reset => "lock btr", + .Toggle => "lock btc", + }; + + const suffix = switch (@sizeOf(T)) { + 2 => "w", + 4 => "l", + 8 => "q", + else => @compileError("Invalid atomic type " ++ @typeName(T)), + }; + + const old_bit = asm volatile (instruction ++ suffix ++ " %[bit], %[ptr]" + : [result] "={@ccc}" (-> u8) // LLVM doesn't support u1 flag register return values + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ); + + return @intCast(u1, old_bit); + } + + const mask = @as(T, 1) << bit; + const value = switch (op) { + .Set => self.fetchOr(mask, ordering), + .Reset => self.fetchAnd(~mask, ordering), + .Toggle => self.fetchXor(mask, ordering), + }; + + return @boolToInt(value & mask != 0); + } + }); + }; +} + +fn atomicIntTypes() []const type { + comptime var bytes = 1; + comptime var types: []const type = &[_]type{}; + inline while (bytes <= @sizeOf(usize)) : (bytes *= 2) { + types = types ++ &[_]type{std.meta.Int(.unsigned, bytes * 8)}; + } + return types; +} + +test "Atomic.loadUnchecked" { + inline for (atomicIntTypes()) |Int| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.loadUnchecked(), 5); + } +} + +test "Atomic.storeUnchecked" { + inline for (atomicIntTypes()) |Int| { + var x = Atomic(usize).init(5); + x.storeUnchecked(10); + try testing.expectEqual(x.loadUnchecked(), 10); + } +} + +test "Atomic.load" { + inline for (atomicIntTypes()) |Int| { + inline for (.{ .Unordered, .Monotonic, .Acquire, .SeqCst }) |ordering| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.load(ordering), 5); + } + } +} + +test "Atomic.store" { + inline for (atomicIntTypes()) |Int| { + inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| { + var x = Atomic(usize).init(5); + x.store(10, ordering); + try testing.expectEqual(x.load(.SeqCst), 10); + } + } +} + +const atomic_rmw_orderings = [_]Ordering{ + .Monotonic, + .Acquire, + .Release, + .AcqRel, + .SeqCst, +}; + +test "Atomic.swap" { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(usize).init(5); + try testing.expectEqual(x.swap(10, ordering), 5); + try testing.expectEqual(x.load(.SeqCst), 10); + + var y = Atomic(enum(usize) { a, b, c }).init(.c); + try testing.expectEqual(y.swap(.a, ordering), .c); + try testing.expectEqual(y.load(.SeqCst), .a); + + var z = Atomic(f32).init(5.0); + try testing.expectEqual(z.swap(10.0, ordering), 5.0); + try testing.expectEqual(z.load(.SeqCst), 10.0); + + var a = Atomic(bool).init(false); + try testing.expectEqual(a.swap(true, ordering), false); + try testing.expectEqual(a.load(.SeqCst), true); + + var b = Atomic(?*u8).init(null); + try testing.expectEqual(b.swap(@intToPtr(?*u8, @alignOf(u8)), ordering), null); + try testing.expectEqual(b.load(.SeqCst), @intToPtr(?*u8, @alignOf(u8))); + } +} + +const atomic_cmpxchg_orderings = [_][2]Ordering{ + .{ .Monotonic, .Monotonic }, + .{ .Acquire, .Monotonic }, + .{ .Acquire, .Acquire }, + .{ .Release, .Monotonic }, + // Although accepted by LLVM, acquire failure implies AcqRel success + // .{ .Release, .Acquire }, + .{ .AcqRel, .Monotonic }, + .{ .AcqRel, .Acquire }, + .{ .SeqCst, .Monotonic }, + .{ .SeqCst, .Acquire }, + .{ .SeqCst, .SeqCst }, +}; + +test "Atomic.compareAndSwap" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_cmpxchg_orderings) |ordering| { + var x = Atomic(Int).init(0); + try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), 0); + try testing.expectEqual(x.load(.SeqCst), 0); + try testing.expectEqual(x.compareAndSwap(0, 1, ordering[0], ordering[1]), null); + try testing.expectEqual(x.load(.SeqCst), 1); + try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), null); + try testing.expectEqual(x.load(.SeqCst), 0); + } + } +} + +test "Atomic.tryCompareAndSwap" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_cmpxchg_orderings) |ordering| { + var x = Atomic(Int).init(0); + + try testing.expectEqual(x.tryCompareAndSwap(1, 0, ordering[0], ordering[1]), 0); + try testing.expectEqual(x.load(.SeqCst), 0); + + while (x.tryCompareAndSwap(0, 1, ordering[0], ordering[1])) |_| {} + try testing.expectEqual(x.load(.SeqCst), 1); + + while (x.tryCompareAndSwap(1, 0, ordering[0], ordering[1])) |_| {} + try testing.expectEqual(x.load(.SeqCst), 0); + } + } +} + +test "Atomic.fetchAdd" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.fetchAdd(5, ordering), 5); + try testing.expectEqual(x.load(.SeqCst), 10); + try testing.expectEqual(x.fetchAdd(std.math.maxInt(Int), ordering), 10); + try testing.expectEqual(x.load(.SeqCst), 9); + } + } +} + +test "Atomic.fetchSub" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.fetchSub(5, ordering), 5); + try testing.expectEqual(x.load(.SeqCst), 0); + try testing.expectEqual(x.fetchSub(1, ordering), 0); + try testing.expectEqual(x.load(.SeqCst), std.math.maxInt(Int)); + } + } +} + +test "Atomic.fetchMin" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.fetchMin(0, ordering), 5); + try testing.expectEqual(x.load(.SeqCst), 0); + try testing.expectEqual(x.fetchMin(10, ordering), 0); + try testing.expectEqual(x.load(.SeqCst), 0); + } + } +} + +test "Atomic.fetchMax" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(5); + try testing.expectEqual(x.fetchMax(10, ordering), 5); + try testing.expectEqual(x.load(.SeqCst), 10); + try testing.expectEqual(x.fetchMax(5, ordering), 10); + try testing.expectEqual(x.load(.SeqCst), 10); + } + } +} + +test "Atomic.fetchAnd" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0b11); + try testing.expectEqual(x.fetchAnd(0b10, ordering), 0b11); + try testing.expectEqual(x.load(.SeqCst), 0b10); + try testing.expectEqual(x.fetchAnd(0b00, ordering), 0b10); + try testing.expectEqual(x.load(.SeqCst), 0b00); + } + } +} + +test "Atomic.fetchNand" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0b11); + try testing.expectEqual(x.fetchNand(0b10, ordering), 0b11); + try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b10)); + try testing.expectEqual(x.fetchNand(0b00, ordering), ~@as(Int, 0b10)); + try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b00)); + } + } +} + +test "Atomic.fetchOr" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0b11); + try testing.expectEqual(x.fetchOr(0b100, ordering), 0b11); + try testing.expectEqual(x.load(.SeqCst), 0b111); + try testing.expectEqual(x.fetchOr(0b010, ordering), 0b111); + try testing.expectEqual(x.load(.SeqCst), 0b111); + } + } +} + +test "Atomic.fetchXor" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0b11); + try testing.expectEqual(x.fetchXor(0b10, ordering), 0b11); + try testing.expectEqual(x.load(.SeqCst), 0b01); + try testing.expectEqual(x.fetchXor(0b01, ordering), 0b01); + try testing.expectEqual(x.load(.SeqCst), 0b00); + } + } +} + +test "Atomic.bitSet" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0); + const bit_array = @as([std.meta.bitCount(Int)]void, undefined); + + for (bit_array) |_, bit_index| { + const bit = @intCast(std.math.Log2Int(Int), bit_index); + const mask = @as(Int, 1) << bit; + + // setting the bit should change the bit + try testing.expect(x.load(.SeqCst) & mask == 0); + try testing.expectEqual(x.bitSet(bit, ordering), 0); + try testing.expect(x.load(.SeqCst) & mask != 0); + + // setting it again shouldn't change the bit + try testing.expectEqual(x.bitSet(bit, ordering), 1); + try testing.expect(x.load(.SeqCst) & mask != 0); + + // all the previous bits should have not changed (still be set) + for (bit_array[0..bit_index]) |_, prev_bit_index| { + const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_mask = @as(Int, 1) << prev_bit; + try testing.expect(x.load(.SeqCst) & prev_mask != 0); + } + } + } + } +} + +test "Atomic.bitReset" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0); + const bit_array = @as([std.meta.bitCount(Int)]void, undefined); + + for (bit_array) |_, bit_index| { + const bit = @intCast(std.math.Log2Int(Int), bit_index); + const mask = @as(Int, 1) << bit; + x.storeUnchecked(x.loadUnchecked() | mask); + + // unsetting the bit should change the bit + try testing.expect(x.load(.SeqCst) & mask != 0); + try testing.expectEqual(x.bitReset(bit, ordering), 1); + try testing.expect(x.load(.SeqCst) & mask == 0); + + // unsetting it again shouldn't change the bit + try testing.expectEqual(x.bitReset(bit, ordering), 0); + try testing.expect(x.load(.SeqCst) & mask == 0); + + // all the previous bits should have not changed (still be reset) + for (bit_array[0..bit_index]) |_, prev_bit_index| { + const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_mask = @as(Int, 1) << prev_bit; + try testing.expect(x.load(.SeqCst) & prev_mask == 0); + } + } + } + } +} + +test "Atomic.bitToggle" { + inline for (atomicIntTypes()) |Int| { + inline for (atomic_rmw_orderings) |ordering| { + var x = Atomic(Int).init(0); + const bit_array = @as([std.meta.bitCount(Int)]void, undefined); + + for (bit_array) |_, bit_index| { + const bit = @intCast(std.math.Log2Int(Int), bit_index); + const mask = @as(Int, 1) << bit; + + // toggling the bit should change the bit + try testing.expect(x.load(.SeqCst) & mask == 0); + try testing.expectEqual(x.bitToggle(bit, ordering), 0); + try testing.expect(x.load(.SeqCst) & mask != 0); + + // toggling it again *should* change the bit + try testing.expectEqual(x.bitToggle(bit, ordering), 1); + try testing.expect(x.load(.SeqCst) & mask == 0); + + // all the previous bits should have not changed (still be toggled back) + for (bit_array[0..bit_index]) |_, prev_bit_index| { + const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index); + const prev_mask = @as(Int, 1) << prev_bit; + try testing.expect(x.load(.SeqCst) & prev_mask == 0); + } + } + } + } +} diff --git a/lib/std/atomic/bool.zig b/lib/std/atomic/bool.zig deleted file mode 100644 index 1c6ac8d046..0000000000 --- a/lib/std/atomic/bool.zig +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2015-2021 Zig Contributors -// This file is part of [zig](https://ziglang.org/), which is MIT licensed. -// The MIT license requires this copyright notice to be included in all copies -// and substantial portions of the software. - -const std = @import("std"); -const builtin = std.builtin; -const testing = std.testing; - -/// Thread-safe, lock-free boolean -pub const Bool = extern struct { - unprotected_value: bool, - - pub const Self = @This(); - - pub fn init(init_val: bool) Self { - return Self{ .unprotected_value = init_val }; - } - - // xchg is only valid rmw operation for a bool - /// Atomically modifies memory and then returns the previous value. - pub fn xchg(self: *Self, operand: bool, comptime ordering: std.builtin.AtomicOrder) bool { - switch (ordering) { - .Monotonic, .Acquire, .Release, .AcqRel, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a RMW operation"), - } - return @atomicRmw(bool, &self.unprotected_value, .Xchg, operand, ordering); - } - - pub fn load(self: *const Self, comptime ordering: std.builtin.AtomicOrder) bool { - switch (ordering) { - .Unordered, .Monotonic, .Acquire, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a load operation"), - } - return @atomicLoad(bool, &self.unprotected_value, ordering); - } - - pub fn store(self: *Self, value: bool, comptime ordering: std.builtin.AtomicOrder) void { - switch (ordering) { - .Unordered, .Monotonic, .Release, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a store operation"), - } - @atomicStore(bool, &self.unprotected_value, value, ordering); - } -}; - -test "std.atomic.Bool" { - var a = Bool.init(false); - try testing.expectEqual(false, a.xchg(false, .SeqCst)); - try testing.expectEqual(false, a.load(.SeqCst)); - a.store(true, .SeqCst); - try testing.expectEqual(true, a.xchg(false, .SeqCst)); - try testing.expectEqual(false, a.load(.SeqCst)); -} diff --git a/lib/std/atomic/int.zig b/lib/std/atomic/int.zig deleted file mode 100644 index 3809541fe3..0000000000 --- a/lib/std/atomic/int.zig +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: MIT -// Copyright (c) 2015-2021 Zig Contributors -// This file is part of [zig](https://ziglang.org/), which is MIT licensed. -// The MIT license requires this copyright notice to be included in all copies -// and substantial portions of the software. - -const std = @import("std"); -const builtin = std.builtin; -const testing = std.testing; - -/// Thread-safe, lock-free integer -pub fn Int(comptime T: type) type { - if (!std.meta.trait.isIntegral(T)) - @compileError("Expected integral type, got '" ++ @typeName(T) ++ "'"); - - return extern struct { - unprotected_value: T, - - pub const Self = @This(); - - pub fn init(init_val: T) Self { - return Self{ .unprotected_value = init_val }; - } - - /// Read, Modify, Write - pub fn rmw(self: *Self, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T { - switch (ordering) { - .Monotonic, .Acquire, .Release, .AcqRel, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a RMW operation"), - } - return @atomicRmw(T, &self.unprotected_value, op, operand, ordering); - } - - pub fn load(self: *const Self, comptime ordering: builtin.AtomicOrder) T { - switch (ordering) { - .Unordered, .Monotonic, .Acquire, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a load operation"), - } - return @atomicLoad(T, &self.unprotected_value, ordering); - } - - pub fn store(self: *Self, value: T, comptime ordering: builtin.AtomicOrder) void { - switch (ordering) { - .Unordered, .Monotonic, .Release, .SeqCst => {}, - else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a store operation"), - } - @atomicStore(T, &self.unprotected_value, value, ordering); - } - - /// Twos complement wraparound increment - /// Returns previous value - pub fn incr(self: *Self) T { - return self.rmw(.Add, 1, .SeqCst); - } - - /// Twos complement wraparound decrement - /// Returns previous value - pub fn decr(self: *Self) T { - return self.rmw(.Sub, 1, .SeqCst); - } - - pub fn get(self: *const Self) T { - return self.load(.SeqCst); - } - - pub fn set(self: *Self, new_value: T) void { - self.store(new_value, .SeqCst); - } - - pub fn xchg(self: *Self, new_value: T) T { - return self.rmw(.Xchg, new_value, .SeqCst); - } - - /// Twos complement wraparound add - /// Returns previous value - pub fn fetchAdd(self: *Self, op: T) T { - return self.rmw(.Add, op, .SeqCst); - } - }; -} - -test "std.atomic.Int" { - var a = Int(u8).init(0); - try testing.expectEqual(@as(u8, 0), a.incr()); - try testing.expectEqual(@as(u8, 1), a.load(.SeqCst)); - a.store(42, .SeqCst); - try testing.expectEqual(@as(u8, 42), a.decr()); - try testing.expectEqual(@as(u8, 41), a.xchg(100)); - try testing.expectEqual(@as(u8, 100), a.fetchAdd(5)); - try testing.expectEqual(@as(u8, 105), a.get()); - a.set(200); -} diff --git a/lib/std/crypto/pcurves/p256/scalar.zig b/lib/std/crypto/pcurves/p256/scalar.zig index 3cd6689897..02474bae08 100644 --- a/lib/std/crypto/pcurves/p256/scalar.zig +++ b/lib/std/crypto/pcurves/p256/scalar.zig @@ -120,12 +120,12 @@ pub const Scalar = struct { /// Compute x+y (mod L) pub fn add(x: Scalar, y: Scalar) Scalar { - return Scalar{ .fe = x.fe().add(y.fe) }; + return Scalar{ .fe = x.fe.add(y.fe) }; } /// Compute x-y (mod L) pub fn sub(x: Scalar, y: Scalar) Scalar { - return Scalar{ .fe = x.fe().sub(y.fe) }; + return Scalar{ .fe = x.fe.sub(y.fe) }; } /// Compute 2n (mod L) @@ -135,7 +135,7 @@ pub const Scalar = struct { /// Compute x*y (mod L) pub fn mul(x: Scalar, y: Scalar) Scalar { - return Scalar{ .fe = x.fe().mul(y.fe) }; + return Scalar{ .fe = x.fe.mul(y.fe) }; } /// Compute x^2 (mod L) diff --git a/lib/std/json.zig b/lib/std/json.zig index 811db020ca..9fb77d501f 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -1105,6 +1105,10 @@ pub const TokenStream = struct { }; } + fn stackUsed(self: *TokenStream) u8 { + return self.parser.stack_used + if (self.token != null) @as(u8, 1) else 0; + } + pub fn next(self: *TokenStream) Error!?Token { if (self.token) |token| { self.token = null; @@ -1457,8 +1461,75 @@ pub const ParseOptions = struct { Error, UseLast, } = .Error, + + /// If false, finding an unknown field returns an error. + ignore_unknown_fields: bool = false, + + allow_trailing_data: bool = false, }; +fn skipValue(tokens: *TokenStream) !void { + const original_depth = tokens.stackUsed(); + + // Return an error if no value is found + _ = try tokens.next(); + if (tokens.stackUsed() < original_depth) return error.UnexpectedJsonDepth; + if (tokens.stackUsed() == original_depth) return; + + while (try tokens.next()) |_| { + if (tokens.stackUsed() == original_depth) return; + } +} + +test "skipValue" { + try skipValue(&TokenStream.init("false")); + try skipValue(&TokenStream.init("true")); + try skipValue(&TokenStream.init("null")); + try skipValue(&TokenStream.init("42")); + try skipValue(&TokenStream.init("42.0")); + try skipValue(&TokenStream.init("\"foo\"")); + try skipValue(&TokenStream.init("[101, 111, 121]")); + try skipValue(&TokenStream.init("{}")); + try skipValue(&TokenStream.init("{\"foo\": \"bar\"}")); + + { // An absurd number of nestings + const nestings = 256; + + try testing.expectError( + error.TooManyNestedItems, + skipValue(&TokenStream.init("[" ** nestings ++ "]" ** nestings)), + ); + } + + { // Would a number token cause problems in a deeply-nested array? + const nestings = 255; + const deeply_nested_array = "[" ** nestings ++ "0.118, 999, 881.99, 911.9, 725, 3" ++ "]" ** nestings; + + try skipValue(&TokenStream.init(deeply_nested_array)); + + try testing.expectError( + error.TooManyNestedItems, + skipValue(&TokenStream.init("[" ++ deeply_nested_array ++ "]")), + ); + } + + // Mismatched brace/square bracket + try testing.expectError( + error.UnexpectedClosingBrace, + skipValue(&TokenStream.init("[102, 111, 111}")), + ); + + { // should fail if no value found (e.g. immediate close of object) + var empty_object = TokenStream.init("{}"); + assert(.ObjectBegin == (try empty_object.next()).?); + try testing.expectError(error.UnexpectedJsonDepth, skipValue(&empty_object)); + + var empty_array = TokenStream.init("[]"); + assert(.ArrayBegin == (try empty_array.next()).?); + try testing.expectError(error.UnexpectedJsonDepth, skipValue(&empty_array)); + } +} + fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions) !T { switch (@typeInfo(T)) { .Bool => { @@ -1558,6 +1629,8 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: .ObjectEnd => break, .String => |stringToken| { const key_source_slice = stringToken.slice(tokens.slice, tokens.i - 1); + var child_options = options; + child_options.allow_trailing_data = true; var found = false; inline for (structInfo.fields) |field, i| { // TODO: using switches here segfault the compiler (#2727?) @@ -1574,31 +1647,38 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: // } if (options.duplicate_field_behavior == .UseFirst) { // unconditonally ignore value. for comptime fields, this skips check against default_value - parseFree(field.field_type, try parse(field.field_type, tokens, options), options); + parseFree(field.field_type, try parse(field.field_type, tokens, child_options), child_options); found = true; break; } else if (options.duplicate_field_behavior == .Error) { return error.DuplicateJSONField; } else if (options.duplicate_field_behavior == .UseLast) { if (!field.is_comptime) { - parseFree(field.field_type, @field(r, field.name), options); + parseFree(field.field_type, @field(r, field.name), child_options); } fields_seen[i] = false; } } if (field.is_comptime) { - if (!try parsesTo(field.field_type, field.default_value.?, tokens, options)) { + if (!try parsesTo(field.field_type, field.default_value.?, tokens, child_options)) { return error.UnexpectedValue; } } else { - @field(r, field.name) = try parse(field.field_type, tokens, options); + @field(r, field.name) = try parse(field.field_type, tokens, child_options); } fields_seen[i] = true; found = true; break; } } - if (!found) return error.UnknownField; + if (!found) { + if (options.ignore_unknown_fields) { + try skipValue(tokens); + continue; + } else { + return error.UnknownField; + } + } }, else => return error.UnexpectedToken, } @@ -1621,14 +1701,17 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: .ArrayBegin => { var r: T = undefined; var i: usize = 0; + var child_options = options; + child_options.allow_trailing_data = true; errdefer { - while (true) : (i -= 1) { + // Without the r.len check `r[i]` is not allowed + if (r.len > 0) while (true) : (i -= 1) { parseFree(arrayInfo.child, r[i], options); if (i == 0) break; - } + }; } while (i < r.len) : (i += 1) { - r[i] = try parse(arrayInfo.child, tokens, options); + r[i] = try parse(arrayInfo.child, tokens, child_options); } const tok = (try tokens.next()) orelse return error.UnexpectedEndOfJson; switch (tok) { @@ -1709,7 +1792,13 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options: pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) !T { const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson; - return parseInternal(T, token, tokens, options); + const r = try parseInternal(T, token, tokens, options); + errdefer parseFree(T, r, options); + if (!options.allow_trailing_data) { + if ((try tokens.next()) != null) unreachable; + assert(tokens.i >= tokens.slice.len); + } + return r; } /// Releases resources created by `parse`. @@ -1778,6 +1867,7 @@ test "parse" { try testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("\"foo\""), ParseOptions{})); try testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("[102, 111, 111]"), ParseOptions{})); + try testing.expectEqual(@as([0]u8, undefined), try parse([0]u8, &TokenStream.init("[]"), ParseOptions{})); } test "parse into enum" { @@ -1793,6 +1883,13 @@ test "parse into enum" { try testing.expectError(error.InvalidEnumTag, parse(T, &TokenStream.init("\"Qux\""), ParseOptions{})); } +test "parse with trailing data" { + try testing.expectEqual(false, try parse(bool, &TokenStream.init("falsed"), ParseOptions{ .allow_trailing_data = true })); + try testing.expectError(error.InvalidTopLevelTrailing, parse(bool, &TokenStream.init("falsed"), ParseOptions{ .allow_trailing_data = false })); + // trailing whitespace is okay + try testing.expectEqual(false, try parse(bool, &TokenStream.init("false \n"), ParseOptions{ .allow_trailing_data = false })); +} + test "parse into that allocates a slice" { try testing.expectError(error.AllocatorRequired, parse([]u8, &TokenStream.init("\"foo\""), ParseOptions{})); @@ -2014,7 +2111,10 @@ test "parse into struct with duplicate field" { const ballast = try testing.allocator.alloc(u64, 1); defer testing.allocator.free(ballast); - const options_first = ParseOptions{ .allocator = testing.allocator, .duplicate_field_behavior = .UseFirst }; + const options_first = ParseOptions{ + .allocator = testing.allocator, + .duplicate_field_behavior = .UseFirst, + }; const options_last = ParseOptions{ .allocator = testing.allocator, @@ -2040,6 +2140,46 @@ test "parse into struct with duplicate field" { try testing.expectError(error.UnexpectedValue, parse(T3, &TokenStream.init(str), options_last)); } +test "parse into struct ignoring unknown fields" { + const T = struct { + int: i64, + language: []const u8, + }; + + const ops = ParseOptions{ + .allocator = testing.allocator, + .ignore_unknown_fields = true, + }; + + const r = try parse(T, &std.json.TokenStream.init( + \\{ + \\ "int": 420, + \\ "float": 3.14, + \\ "with\\escape": true, + \\ "with\u0105unicode\ud83d\ude02": false, + \\ "optional": null, + \\ "static_array": [66.6, 420.420, 69.69], + \\ "dynamic_array": [66.6, 420.420, 69.69], + \\ "complex": { + \\ "nested": "zig" + \\ }, + \\ "veryComplex": [ + \\ { + \\ "foo": "zig" + \\ }, { + \\ "foo": "rocks" + \\ } + \\ ], + \\ "a_union": 100000, + \\ "language": "zig" + \\} + ), ops); + defer parseFree(T, r, ops); + + try testing.expectEqual(@as(i64, 420), r.int); + try testing.expectEqualSlices(u8, "zig", r.language); +} + /// A non-stream JSON parser which constructs a tree of Value's. pub const Parser = struct { allocator: *Allocator, diff --git a/lib/std/os.zig b/lib/std/os.zig index 93d0b9832d..2fe7ba9c5a 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -5534,7 +5534,7 @@ pub const CopyFileRangeError = error{ var has_copy_file_range_syscall = init: { const kernel_has_syscall = std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true; - break :init std.atomic.Bool.init(kernel_has_syscall); + break :init std.atomic.Atomic(bool).init(kernel_has_syscall); }; /// Transfer data between file descriptors at specified offsets. diff --git a/lib/std/packed_int_array.zig b/lib/std/packed_int_array.zig index e1c22e2860..ec401e98e2 100644 --- a/lib/std/packed_int_array.zig +++ b/lib/std/packed_int_array.zig @@ -379,9 +379,9 @@ test "PackedIntArray" { } test "PackedIntIo" { - const bytes = [_]u8 { 0b01101_000, 0b01011_110, 0b00011_101 }; - try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3)); - try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3)); + const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 }; + try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3)); + try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .Little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .Little).get(&bytes, 0, 3)); } diff --git a/lib/std/target.zig b/lib/std/target.zig index 14420d5290..692d29b1c7 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -767,6 +767,13 @@ pub const Target = struct { spirv32, spirv64, + pub fn isX86(arch: Arch) bool { + return switch (arch) { + .i386, .x86_64 => true, + else => false, + }; + } + pub fn isARM(arch: Arch) bool { return switch (arch) { .arm, .armeb => true, diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index 11c6422f98..aa0ba0515d 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -400,7 +400,7 @@ pub const list = list: { "@fence", .{ .tag = .fence, - .param_count = 0, + .param_count = 1, }, }, .{ |
