diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-09-15 18:55:39 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2021-09-15 19:00:35 -0700 |
| commit | b67d1810be3234c363ee2929ffcc91083bfb0ae5 (patch) | |
| tree | c4fda56637928dc68069dcdd28411d37cafd721b /src/Sema.zig | |
| parent | f83a4b444c4c2fca7086fe6dbdaccc7b6d8f35ca (diff) | |
| download | zig-b67d1810be3234c363ee2929ffcc91083bfb0ae5.tar.gz zig-b67d1810be3234c363ee2929ffcc91083bfb0ae5.zip | |
stage2: implement `@atomicRmw` and `@atomicLoad`
* langref: add some more "see also" links for atomics
* Add the following AIR instructions
- atomic_load
- atomic_store_unordered
- atomic_store_monotonic
- atomic_store_release
- atomic_store_seq_cst
- atomic_rmw
* Implement those AIR instructions in LLVM and C backends.
* AstGen: make the `ty` result locations for `@atomicRmw`, `@atomicLoad`,
and `@atomicStore` be `coerced_ty` to avoid unnecessary ZIR
instructions when Sema will be doing the coercions redundantly.
* Sema for `@atomicLoad` and `@atomicRmw` is done, however Sema for
`@atomicStore` is not yet implemented.
- comptime eval for `@atomicRmw` is not yet implemented.
* Sema: flesh out `coerceInMemoryAllowed` a little bit more. It can now
handle pointers.
Diffstat (limited to 'src/Sema.zig')
| -rw-r--r-- | src/Sema.zig | 192 |
1 files changed, 185 insertions, 7 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index e679f03fcc..c163178890 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7549,6 +7549,19 @@ fn resolveAtomicOrder( return val.toEnum(std.builtin.AtomicOrder); } +fn resolveAtomicRmwOp( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, +) CompileError!std.builtin.AtomicRmwOp { + const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp"); + const air_ref = sema.resolveInst(zir_ref); + const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src); + const val = try sema.resolveConstValue(block, src, coerced); + return val.toEnum(std.builtin.AtomicRmwOp); +} + fn zirCmpxchg( sema: *Sema, block: *Scope.Block, @@ -7664,14 +7677,108 @@ fn zirSelect(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); + const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; + // zig fmt: off + const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + // zig fmt: on + const ptr = sema.resolveInst(extra.lhs); + const elem_ty = sema.typeOf(ptr).elemType(); + try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); + const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs); + + switch (order) { + .Release, .AcqRel => { + return sema.mod.fail( + &block.base, + order_src, + "@atomicLoad atomic ordering must not be Release or AcqRel", + .{}, + ); + }, + else => {}, + } + + if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { + if (try ptr_val.pointerDeref(sema.arena)) |elem_val| { + return sema.addConstant(elem_ty, elem_val); + } + } + + try sema.requireRuntimeBlock(block, ptr_src); + return block.addInst(.{ + .tag = .atomic_load, + .data = .{ .atomic_load = .{ + .ptr = ptr, + .order = order, + } }, + }); } fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); + // zig fmt: off + const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; + const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; + // zig fmt: on + const ptr = sema.resolveInst(extra.ptr); + const operand_ty = sema.typeOf(ptr).elemType(); + try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); + const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); + + switch (operand_ty.zigTypeTag()) { + .Enum => if (op != .Xchg) { + return mod.fail(&block.base, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); + }, + .Bool => if (op != .Xchg) { + return mod.fail(&block.base, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); + }, + .Float => switch (op) { + .Xchg, .Add, .Sub => {}, + else => return mod.fail(&block.base, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), + }, + else => {}, + } + const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); + const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); + + if (order == .Unordered) { + return mod.fail(&block.base, order_src, "@atomicRmw atomic ordering must not be Unordered", .{}); + } + + // special case zero bit types + if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| { + return sema.addConstant(operand_ty, val); + } + + const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { + if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| { + _ = ptr_val; + _ = operand_val; + return mod.fail(&block.base, src, "TODO implement Sema for @atomicRmw at comptime", .{}); + } else break :rs operand_src; + } else ptr_src; + + const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3); + + try sema.requireRuntimeBlock(block, runtime_src); + return block.addInst(.{ + .tag = .atomic_rmw, + .data = .{ .pl_op = .{ + .operand = ptr, + .payload = try sema.addExtra(Air.AtomicRmw{ + .operand = operand, + .flags = flags, + }), + } }, + }); } fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8848,7 +8955,7 @@ fn coerce( if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false); if (in_memory_result == .ok) { return sema.bitcast(block, dest_type, inst, inst_src); } @@ -8890,11 +8997,12 @@ fn coerce( const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + const dest_is_mut = !dest_type.isConstPtr(); + if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr; if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); - switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { + switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut)) { .ok => {}, .no_match => break :src_array_ptr, } @@ -9001,10 +9109,80 @@ const InMemoryCoercionResult = enum { no_match, }; -fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult { +/// If pointers have the same representation in runtime memory, a bitcast AIR instruction +/// may be used for the coercion. +/// * `const` attribute can be gained +/// * `volatile` attribute can be gained +/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut +/// * alignment can be decreased +/// * bit offset attributes must match exactly +/// * `*`/`[*]` must match exactly, but `[*c]` matches either one +/// * sentinel-terminated pointers can coerce into `[*]` +/// TODO improve this function to report recursive compile errors like it does in stage1. +/// look at the function types_match_const_cast_only +fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InMemoryCoercionResult { if (dest_type.eql(src_type)) return .ok; + if (dest_type.zigTypeTag() == .Pointer and + src_type.zigTypeTag() == .Pointer) + { + const dest_info = dest_type.ptrInfo().data; + const src_info = src_type.ptrInfo().data; + + const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable); + if (child == .no_match) { + return child; + } + + const ok_sent = dest_info.sentinel == null or src_info.size == .C or + (src_info.sentinel != null and + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); + if (!ok_sent) { + return .no_match; + } + + const ok_ptr_size = src_info.size == dest_info.size or + src_info.size == .C or dest_info.size == .C; + if (!ok_ptr_size) { + return .no_match; + } + + const ok_cv_qualifiers = + (src_info.mutable or !dest_info.mutable) and + (!src_info.@"volatile" or dest_info.@"volatile"); + + if (!ok_cv_qualifiers) { + return .no_match; + } + + const ok_allows_zero = (dest_info.@"allowzero" and + (src_info.@"allowzero" or !dest_is_mut)) or + (!dest_info.@"allowzero" and !src_info.@"allowzero"); + if (!ok_allows_zero) { + return .no_match; + } + + if (dest_type.hasCodeGenBits() != src_type.hasCodeGenBits()) { + return .no_match; + } + + if (src_info.host_size != dest_info.host_size or + src_info.bit_offset != dest_info.bit_offset) + { + return .no_match; + } + + assert(src_info.@"align" != 0); + assert(dest_info.@"align" != 0); + + if (dest_info.@"align" > src_info.@"align") { + return .no_match; + } + + return .ok; + } + // TODO: implement more of this function return .no_match; |
