diff options
| -rw-r--r-- | doc/langref.html.in | 25 | ||||
| -rw-r--r-- | lib/std/atomic/queue.zig | 2 | ||||
| -rw-r--r-- | lib/std/atomic/stack.zig | 2 | ||||
| -rw-r--r-- | lib/std/event/channel.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/future.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/lock.zig | 10 | ||||
| -rw-r--r-- | lib/std/event/loop.zig | 4 | ||||
| -rw-r--r-- | lib/std/event/rwlock.zig | 18 | ||||
| -rw-r--r-- | lib/std/os/linux.zig | 2 | ||||
| -rw-r--r-- | lib/std/spinlock.zig | 3 | ||||
| -rw-r--r-- | src/all_types.hpp | 12 | ||||
| -rw-r--r-- | src/codegen.cpp | 14 | ||||
| -rw-r--r-- | src/ir.cpp | 103 | ||||
| -rw-r--r-- | src/ir_print.cpp | 26 | ||||
| -rw-r--r-- | test/compile_errors.zig | 10 | ||||
| -rw-r--r-- | test/stage1/behavior/atomics.zig | 21 |
16 files changed, 232 insertions, 28 deletions
diff --git a/doc/langref.html.in b/doc/langref.html.in index bca12c62d7..5d989c1bbd 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6612,14 +6612,14 @@ async fn func(y: *i32) void { This builtin function atomically dereferences a pointer and returns the value. </p> <p> - {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, - or an integer whose bit count meets these requirements: + {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#} + an integer whose bit count meets these requirements: </p> <ul> <li>At least 8</li> <li>At most the same as usize</li> <li>Power of 2</li> - </ul> + </ul> or an enum with a valid integer tag type. <p> TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe we can remove this restriction @@ -6660,6 +6660,25 @@ async fn func(y: *i32) void { <li>{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.</li> </ul> {#header_close#} + {#header_open|@atomicStore#} + <pre>{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}</pre> + <p> + This builtin function atomically stores a value. + </p> + <p> + {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#} + an integer whose bit count meets these requirements: + </p> + <ul> + <li>At least 8</li> + <li>At most the same as usize</li> + <li>Power of 2</li> + </ul> or an enum with a valid integer tag type. + <p> + TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe + we can remove this restriction + </p> + {#header_close#} {#header_open|@bitCast#} <pre>{#syntax#}@bitCast(comptime DestType: type, value: var) DestType{#endsyntax#}</pre> <p> diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 173355eb3b..9d6b15ff4a 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -199,7 +199,7 @@ test "std.atomic.Queue" { for (putters) |t| t.wait(); - _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); for (getters) |t| t.wait(); diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 664191eb77..4246e15985 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -128,7 +128,7 @@ test "std.atomic.stack" { for (putters) |t| t.wait(); - _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); for (getters) |t| t.wait(); } diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig index 2ea99d234d..ac5a65e1b0 100644 --- a/lib/std/event/channel.zig +++ b/lib/std/event/channel.zig @@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type { fn dispatch(self: *SelfChannel) void { // set the "need dispatch" flag - _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.need_dispatch, 1, .SeqCst); lock: while (true) { // set the lock flag @@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type { if (prev_lock != 0) return; // clear the need_dispatch flag since we're about to do it - _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.need_dispatch, 0, .SeqCst); while (true) { one_dispatch: { diff --git a/lib/std/event/future.zig b/lib/std/event/future.zig index 43593b348a..5261db990c 100644 --- a/lib/std/event/future.zig +++ b/lib/std/event/future.zig @@ -62,12 +62,12 @@ pub fn Future(comptime T: type) type { pub async fn start(self: *Self) ?*T { const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null; switch (state) { - 1 => { + .Started => { const held = self.lock.acquire(); held.release(); return &self.data; }, - 2 => return &self.data, + .Finished => return &self.data, else => unreachable, } } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 576a09064f..a95c5bf7e2 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -31,8 +31,8 @@ pub const Lock = struct { } // We need to release the lock. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); + @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. @@ -56,8 +56,8 @@ pub const Lock = struct { } // Release the lock again. - _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); + @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); // Find out if we can be done. if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { @@ -101,7 +101,7 @@ pub const Lock = struct { // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor // will attempt to grab the lock. - _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst); const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); if (old_bit == 0) { diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 588cd3c8b5..8f01c19746 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -814,7 +814,7 @@ pub const Loop = struct { _ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable; }, .linux => { - _ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + @atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst); const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1); switch (os.linux.getErrno(rc)) { 0 => {}, @@ -837,7 +837,7 @@ pub const Loop = struct { fn posixFsRun(self: *Loop) void { while (true) { if (builtin.os == .linux) { - _ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst); + @atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst); } while (self.os_data.fs_queue.get()) |node| { switch (node.data.msg) { diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index 3a64b9df8c..ec4ab8f6d0 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -40,7 +40,7 @@ pub const RwLock = struct { return; } - _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst); if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; @@ -64,15 +64,15 @@ pub const RwLock = struct { // We need to release the write lock. Check if any readers are waiting to grab the lock. if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { // Switch to a read lock. - _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, .ReadLock, .SeqCst); + @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { global_event_loop.onNextTick(node); } return; } - _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst); + @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst); self.lock.commonPostUnlock(); } @@ -113,7 +113,7 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true; @@ -144,7 +144,7 @@ pub const RwLock = struct { // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, // some actor will attempt to grab the lock. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst); + @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) { @@ -176,8 +176,8 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst); - _ = @atomicRmw(State, &self.shared_state, .Xchg, .Unlocked, .SeqCst); + @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst); continue; } @@ -195,7 +195,7 @@ pub const RwLock = struct { return; } // Release the lock again. - _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst); + @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst); if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 618a21f456..7e2f14021f 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -531,7 +531,7 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize { const ptr = @intToPtr(?*const c_void, vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM)); // Note that we may not have a VDSO at all, update the stub address anyway // so that clock_gettime will fall back on the good old (and slow) syscall - _ = @cmpxchgStrong(?*const c_void, &vdso_clock_gettime, &init_vdso_clock_gettime, ptr, .Monotonic, .Monotonic); + @atomicStore(?*const c_void, &vdso_clock_gettime, ptr, .Monotonic); // Call into the VDSO if available if (ptr) |fn_ptr| { const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr); diff --git a/lib/std/spinlock.zig b/lib/std/spinlock.zig index 3bed3d3891..bd811f709c 100644 --- a/lib/std/spinlock.zig +++ b/lib/std/spinlock.zig @@ -11,8 +11,7 @@ pub const SpinLock = struct { spinlock: *SpinLock, pub fn release(self: Held) void { - // TODO: @atomicStore() https://github.com/ziglang/zig/issues/2995 - assert(@atomicRmw(u8, &self.spinlock.lock, .Xchg, 0, .Release) == 1); + @atomicStore(u8, &self.spinlock.lock, 0, .Release); } }; diff --git a/src/all_types.hpp b/src/all_types.hpp index 1464dfba59..25815ef64a 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1700,6 +1700,7 @@ enum BuiltinFnId { BuiltinFnIdErrorReturnTrace, BuiltinFnIdAtomicRmw, BuiltinFnIdAtomicLoad, + BuiltinFnIdAtomicStore, BuiltinFnIdHasDecl, BuiltinFnIdUnionInit, BuiltinFnIdFrameAddress, @@ -2569,6 +2570,7 @@ enum IrInstructionId { IrInstructionIdErrorUnion, IrInstructionIdAtomicRmw, IrInstructionIdAtomicLoad, + IrInstructionIdAtomicStore, IrInstructionIdSaveErrRetAddr, IrInstructionIdAddImplicitReturnType, IrInstructionIdErrSetCast, @@ -3714,6 +3716,16 @@ struct IrInstructionAtomicLoad { AtomicOrder resolved_ordering; }; +struct IrInstructionAtomicStore { + IrInstruction base; + + IrInstruction *operand_type; + IrInstruction *ptr; + IrInstruction *value; + IrInstruction *ordering; + AtomicOrder resolved_ordering; +}; + struct IrInstructionSaveErrRetAddr { IrInstruction base; }; diff --git a/src/codegen.cpp b/src/codegen.cpp index 387c6120c2..a0666a3522 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5655,6 +5655,17 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable, return load_inst; } +static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutable *executable, + IrInstructionAtomicStore *instruction) +{ + LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering); + LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); + LLVMValueRef value = ir_llvm_value(g, instruction->value); + LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value.type); + LLVMSetOrdering(store_inst, ordering); + return nullptr; +} + static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) { LLVMValueRef op = ir_llvm_value(g, instruction->op1); assert(instruction->base.value.type->id == ZigTypeIdFloat); @@ -6258,6 +6269,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction); + case IrInstructionIdAtomicStore: + return ir_render_atomic_store(g, executable, (IrInstructionAtomicStore *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction); case IrInstructionIdFloatOp: @@ -8074,6 +8087,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0); create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5); create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3); + create_builtin_fn(g, BuiltinFnIdAtomicStore, "atomicStore", 4); create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2); create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1); create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2); diff --git a/src/ir.cpp b/src/ir.cpp index 676f69dea7..b6cc3cd4cb 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1010,6 +1010,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) { return IrInstructionIdAtomicLoad; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicStore *) { + return IrInstructionIdAtomicStore; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) { return IrInstructionIdSaveErrRetAddr; } @@ -3188,6 +3192,25 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_atomic_store(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *value, + IrInstruction *ordering, AtomicOrder resolved_ordering) +{ + IrInstructionAtomicStore *instruction = ir_build_instruction<IrInstructionAtomicStore>(irb, scope, source_node); + instruction->operand_type = operand_type; + instruction->ptr = ptr; + instruction->value = value; + instruction->ordering = ordering; + instruction->resolved_ordering = resolved_ordering; + + if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block); + ir_ref_instruction(ptr, irb->current_basic_block); + ir_ref_instruction(value, irb->current_basic_block); + if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionSaveErrRetAddr *instruction = ir_build_instruction<IrInstructionSaveErrRetAddr>(irb, scope, source_node); return &instruction->base; @@ -5732,6 +5755,33 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo AtomicOrderMonotonic); return ir_lval_wrap(irb, scope, inst, lval, result_loc); } + case BuiltinFnIdAtomicStore: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + AstNode *arg2_node = node->data.fn_call_expr.params.at(2); + IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope); + if (arg2_value == irb->codegen->invalid_instruction) + return arg2_value; + + AstNode *arg3_node = node->data.fn_call_expr.params.at(3); + IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope); + if (arg3_value == irb->codegen->invalid_instruction) + return arg3_value; + + IrInstruction *inst = ir_build_atomic_store(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value, + // this value does not mean anything since we passed non-null values for other arg + AtomicOrderMonotonic); + return ir_lval_wrap(irb, scope, inst, lval, result_loc); + } case BuiltinFnIdIntToEnum: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -25848,6 +25898,56 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr return result; } +static IrInstruction *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstructionAtomicStore *instruction) { + ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child); + if (type_is_invalid(operand_type)) + return ira->codegen->invalid_instruction; + + IrInstruction *ptr_inst = instruction->ptr->child; + if (type_is_invalid(ptr_inst->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false); + IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type); + if (type_is_invalid(casted_ptr->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *value = instruction->value->child; + if (type_is_invalid(value->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *casted_value = ir_implicit_cast(ira, value, operand_type); + if (type_is_invalid(casted_value->value.type)) + return ira->codegen->invalid_instruction; + + + AtomicOrder ordering; + if (instruction->ordering == nullptr) { + ordering = instruction->resolved_ordering; + } else { + if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering)) + return ira->codegen->invalid_instruction; + } + + if (ordering == AtomicOrderAcquire || ordering == AtomicOrderAcqRel) { + ir_assert(instruction->ordering != nullptr, &instruction->base); + ir_add_error(ira, instruction->ordering, + buf_sprintf("@atomicStore atomic ordering must not be Acquire or AcqRel")); + return ira->codegen->invalid_instruction; + } + + if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) { + IrInstruction *result = ir_analyze_store_ptr(ira, &instruction->base, casted_ptr, value, false); + result->value.type = ira->codegen->builtin_types.entry_void; + return result; + } + + IrInstruction *result = ir_build_atomic_store(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr, casted_ptr, casted_value, nullptr, ordering); + result->value.type = ira->codegen->builtin_types.entry_void; + return result; +} + static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) { IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope, instruction->base.source_node); @@ -26882,6 +26982,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); case IrInstructionIdAtomicLoad: return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction); + case IrInstructionIdAtomicStore: + return ir_analyze_instruction_atomic_store(ira, (IrInstructionAtomicStore *)instruction); case IrInstructionIdSaveErrRetAddr: return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction); case IrInstructionIdAddImplicitReturnType: @@ -27062,6 +27164,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSaveErrRetAddr: case IrInstructionIdAddImplicitReturnType: case IrInstructionIdAtomicRmw: + case IrInstructionIdAtomicStore: case IrInstructionIdCmpxchgGen: case IrInstructionIdCmpxchgSrc: case IrInstructionIdAssertZero: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index da7ae38e5f..03224d8037 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -324,6 +324,8 @@ const char* ir_instruction_type_str(IrInstructionId id) { return "AtomicRmw"; case IrInstructionIdAtomicLoad: return "AtomicLoad"; + case IrInstructionIdAtomicStore: + return "AtomicStore"; case IrInstructionIdSaveErrRetAddr: return "SaveErrRetAddr"; case IrInstructionIdAddImplicitReturnType: @@ -1871,6 +1873,27 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct fprintf(irp->f, ")"); } +static void ir_print_atomic_store(IrPrint *irp, IrInstructionAtomicStore *instruction) { + fprintf(irp->f, "@atomicStore("); + if (instruction->operand_type != nullptr) { + ir_print_other_instruction(irp, instruction->operand_type); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->ptr); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->value); + fprintf(irp->f, ","); + if (instruction->ordering != nullptr) { + ir_print_other_instruction(irp, instruction->ordering); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ")"); +} + + static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) { fprintf(irp->f, "@saveErrRetAddr()"); } @@ -2431,6 +2454,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool case IrInstructionIdAtomicLoad: ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction); break; + case IrInstructionIdAtomicStore: + ir_print_atomic_store(irp, (IrInstructionAtomicStore *)instruction); + break; case IrInstructionIdEnumToInt: ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction); break; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 47333b7db4..63d7240d42 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3,6 +3,16 @@ const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( + "atomic orderings of atomicStore Acquire or AcqRel", + \\export fn entry() void { + \\ var x: u32 = 0; + \\ @atomicStore(u32, &x, 1, .Acquire); + \\} + , + "tmp.zig:3:30: error: @atomicStore atomic ordering must not be Acquire or AcqRel", + ); + + cases.add( "missing const in slice with nested array type", \\const Geo3DTex2D = struct { vertices: [][2]f32 }; \\pub fn getGeo3DTex2D() Geo3DTex2D { diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig index c6344b17ca..694eb160e4 100644 --- a/test/stage1/behavior/atomics.zig +++ b/test/stage1/behavior/atomics.zig @@ -123,3 +123,24 @@ test "atomic load and rmw with enum" { expect(@atomicLoad(Value, &x, .SeqCst) != .a); expect(@atomicLoad(Value, &x, .SeqCst) != .b); } + +test "atomic store" { + var x: u32 = 0; + @atomicStore(u32, &x, 1, .SeqCst); + expect(@atomicLoad(u32, &x, .SeqCst) == 1); + @atomicStore(u32, &x, 12345678, .SeqCst); + expect(@atomicLoad(u32, &x, .SeqCst) == 12345678); +} + +test "atomic store comptime" { + comptime testAtomicStore(); + testAtomicStore(); +} + +fn testAtomicStore() void { + var x: u32 = 0; + @atomicStore(u32, &x, 1, .SeqCst); + expect(@atomicLoad(u32, &x, .SeqCst) == 1); + @atomicStore(u32, &x, 12345678, .SeqCst); + expect(@atomicLoad(u32, &x, .SeqCst) == 12345678); +}
\ No newline at end of file |
