diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-09-15 18:55:39 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2021-09-15 19:00:35 -0700 |
| commit | b67d1810be3234c363ee2929ffcc91083bfb0ae5 (patch) | |
| tree | c4fda56637928dc68069dcdd28411d37cafd721b /src/codegen | |
| parent | f83a4b444c4c2fca7086fe6dbdaccc7b6d8f35ca (diff) | |
| download | zig-b67d1810be3234c363ee2929ffcc91083bfb0ae5.tar.gz zig-b67d1810be3234c363ee2929ffcc91083bfb0ae5.zip | |
stage2: implement `@atomicRmw` and `@atomicLoad`
* langref: add some more "see also" links for atomics
* Add the following AIR instructions
- atomic_load
- atomic_store_unordered
- atomic_store_monotonic
- atomic_store_release
- atomic_store_seq_cst
- atomic_rmw
* Implement those AIR instructions in LLVM and C backends.
* AstGen: make the `ty` result locations for `@atomicRmw`, `@atomicLoad`,
and `@atomicStore` be `coerced_ty` to avoid unnecessary ZIR
instructions when Sema will be doing the coercions redundantly.
* Sema for `@atomicLoad` and `@atomicRmw` is done, however Sema for
`@atomicStore` is not yet implemented.
- comptime eval for `@atomicRmw` is not yet implemented.
* Sema: flesh out `coerceInMemoryAllowed` a little bit more. It can now
handle pointers.
Diffstat (limited to 'src/codegen')
| -rw-r--r-- | src/codegen/c.zig | 83 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 160 | ||||
| -rw-r--r-- | src/codegen/llvm/bindings.zig | 63 |
3 files changed, 293 insertions, 13 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a2e2d7b20d..5eb4388a9e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -914,6 +914,13 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM .array_to_slice => try airArrayToSlice(o, inst), .cmpxchg_weak => try airCmpxchg(o, inst, "weak"), .cmpxchg_strong => try airCmpxchg(o, inst, "strong"), + .atomic_rmw => try airAtomicRmw(o, inst), + .atomic_load => try airAtomicLoad(o, inst), + + .atomic_store_unordered => try airAtomicStore(o, inst, toMemoryOrder(.Unordered)), + .atomic_store_monotonic => try airAtomicStore(o, inst, toMemoryOrder(.Monotonic)), + .atomic_store_release => try airAtomicStore(o, inst, toMemoryOrder(.Release)), + .atomic_store_seq_cst => try airAtomicStore(o, inst, toMemoryOrder(.SeqCst)), .struct_field_ptr_index_0 => try airStructFieldPtrIndex(o, inst, 0), .struct_field_ptr_index_1 => try airStructFieldPtrIndex(o, inst, 1), @@ -1917,8 +1924,61 @@ fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { return local; } -fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { - const str = switch (order) { +fn airAtomicRmw(o: *Object, inst: Air.Inst.Index) !CValue { + const pl_op = o.air.instructions.items(.data)[inst].pl_op; + const extra = o.air.extraData(Air.AtomicRmw, pl_op.payload).data; + const inst_ty = o.air.typeOfIndex(inst); + const ptr = try o.resolveInst(pl_op.operand); + const operand = try o.resolveInst(extra.operand); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.print(" = zig_atomicrmw_{s}(", .{toAtomicRmwSuffix(extra.op())}); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try o.writeCValue(writer, operand); + try writer.writeAll(", "); + try writeMemoryOrder(writer, extra.ordering()); + try writer.writeAll(");\n"); + + return local; +} + +fn airAtomicLoad(o: *Object, inst: Air.Inst.Index) !CValue { + const atomic_load = o.air.instructions.items(.data)[inst].atomic_load; + const inst_ty = o.air.typeOfIndex(inst); + const ptr = try o.resolveInst(atomic_load.ptr); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.writeAll(" = zig_atomic_load("); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try writeMemoryOrder(writer, atomic_load.order); + try writer.writeAll(");\n"); + + return local; +} + +fn airAtomicStore(o: *Object, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const ptr = try o.resolveInst(bin_op.lhs); + const element = try o.resolveInst(bin_op.rhs); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.writeAll(" = zig_atomic_store("); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try o.writeCValue(writer, element); + try writer.print(", {s});\n", .{order}); + + return local; +} + +fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { + return switch (order) { .Unordered => "memory_order_relaxed", .Monotonic => "memory_order_consume", .Acquire => "memory_order_acquire", @@ -1926,7 +1986,24 @@ fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { .AcqRel => "memory_order_acq_rel", .SeqCst => "memory_order_seq_cst", }; - return w.writeAll(str); +} + +fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { + return w.writeAll(toMemoryOrder(order)); +} + +fn toAtomicRmwSuffix(order: std.builtin.AtomicRmwOp) []const u8 { + return switch (order) { + .Xchg => "xchg", + .Add => "add", + .Sub => "sub", + .And => "and", + .Nand => "nand", + .Or => "or", + .Xor => "xor", + .Max => "max", + .Min => "min", + }; } fn IndentWriter(comptime UnderlyingWriter: type) type { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 569f857caa..af25ae51b1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1060,6 +1060,13 @@ pub const FuncGen = struct { .cmpxchg_weak => try self.airCmpxchg(inst, true), .cmpxchg_strong => try self.airCmpxchg(inst, false), .fence => try self.airFence(inst), + .atomic_rmw => try self.airAtomicRmw(inst), + .atomic_load => try self.airAtomicLoad(inst), + + .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), + .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), + .atomic_store_release => try self.airAtomicStore(inst, .Release), + .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent), .struct_field_ptr => try self.airStructFieldPtr(inst), .struct_field_val => try self.airStructFieldVal(inst), @@ -1983,20 +1990,19 @@ pub const FuncGen = struct { fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); const src_operand = try self.resolveInst(bin_op.rhs); - // TODO set volatile on this store properly - _ = self.builder.buildStore(src_operand, dest_ptr); + _ = self.store(dest_ptr, ptr_ty, src_operand); return null; } fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); - if (!is_volatile and self.liveness.isUnused(inst)) + const ptr_ty = self.air.typeOf(ty_op.operand); + if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const ptr = try self.resolveInst(ty_op.operand); - // TODO set volatile on this load properly - return self.builder.buildLoad(ptr, ""); + return self.load(ptr, ptr_ty); } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { @@ -2033,14 +2039,12 @@ pub const FuncGen = struct { new_value = self.builder.buildZExt(new_value, abi_ty, ""); } } - const success_order = toLlvmAtomicOrdering(extra.successOrder()); - const failure_order = toLlvmAtomicOrdering(extra.failureOrder()); const result = self.builder.buildCmpXchg( ptr, expected_value, new_value, - success_order, - failure_order, + toLlvmAtomicOrdering(extra.successOrder()), + toLlvmAtomicOrdering(extra.failureOrder()), is_weak, self.single_threaded, ); @@ -2066,6 +2070,109 @@ pub const FuncGen = struct { return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); } + fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; + const ptr = try self.resolveInst(pl_op.operand); + const ptr_ty = self.air.typeOf(pl_op.operand); + const operand_ty = ptr_ty.elemType(); + const operand = try self.resolveInst(extra.operand); + const is_signed_int = operand_ty.isSignedInt(); + const is_float = operand_ty.isFloat(); + const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); + const ordering = toLlvmAtomicOrdering(extra.ordering()); + const single_threaded = llvm.Bool.fromBool(self.single_threaded); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg); + if (opt_abi_ty) |abi_ty| { + // operand needs widening and truncating or bitcasting. + const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + const casted_operand = if (is_float) + self.builder.buildBitCast(operand, abi_ty, "") + else if (is_signed_int) + self.builder.buildSExt(operand, abi_ty, "") + else + self.builder.buildZExt(operand, abi_ty, ""); + + const uncasted_result = self.builder.buildAtomicRmw( + op, + casted_ptr, + casted_operand, + ordering, + single_threaded, + ); + const operand_llvm_ty = try self.dg.llvmType(operand_ty); + if (is_float) { + return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); + } else { + return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, ""); + } + } + + if (operand.typeOf().getTypeKind() != .Pointer) { + return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded); + } + + // It's a pointer but we need to treat it as an int. + const usize_llvm_ty = try self.dg.llvmType(Type.initTag(.usize)); + const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); + const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); + const uncasted_result = self.builder.buildAtomicRmw( + op, + casted_ptr, + casted_operand, + ordering, + single_threaded, + ); + const operand_llvm_ty = try self.dg.llvmType(operand_ty); + return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); + } + + fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; + const ptr = try self.resolveInst(atomic_load.ptr); + const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ordering = toLlvmAtomicOrdering(atomic_load.order); + const operand_ty = ptr_ty.elemType(); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); + + if (opt_abi_ty) |abi_ty| { + // operand needs widening and truncating + const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + const load_inst = self.load(casted_ptr, ptr_ty); + load_inst.setOrdering(ordering); + return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), ""); + } + const load_inst = self.load(ptr, ptr_ty); + load_inst.setOrdering(ordering); + return load_inst; + } + + fn airAtomicStore( + self: *FuncGen, + inst: Air.Inst.Index, + ordering: llvm.AtomicOrdering, + ) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + var ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); + var element = try self.resolveInst(bin_op.rhs); + const operand_ty = ptr_ty.elemType(); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); + + if (opt_abi_ty) |abi_ty| { + // operand needs widening + ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + if (operand_ty.isSignedInt()) { + element = self.builder.buildSExt(element, abi_ty, ""); + } else { + element = self.builder.buildZExt(element, abi_ty, ""); + } + } + const store_inst = self.store(ptr, ptr_ty, element); + store_inst.setOrdering(ordering); + return null; + } + fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); @@ -2074,6 +2181,21 @@ pub const FuncGen = struct { // `getIntrinsicDeclaration` return self.llvmModule().getIntrinsicDeclaration(id, null, 0); } + + fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) *const llvm.Value { + _ = ptr_ty; // TODO set volatile and alignment on this load properly + return self.builder.buildLoad(ptr, ""); + } + + fn store( + self: *FuncGen, + ptr: *const llvm.Value, + ptr_ty: Type, + elem: *const llvm.Value, + ) *const llvm.Value { + _ = ptr_ty; // TODO set volatile and alignment on this store properly + return self.builder.buildStore(elem, ptr); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -2227,3 +2349,21 @@ fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrderi .SeqCst => .SequentiallyConsistent, }; } + +fn toLlvmAtomicRmwBinOp( + op: std.builtin.AtomicRmwOp, + is_signed: bool, + is_float: bool, +) llvm.AtomicRMWBinOp { + return switch (op) { + .Xchg => .Xchg, + .Add => if (is_float) llvm.AtomicRMWBinOp.FAdd else return .Add, + .Sub => if (is_float) llvm.AtomicRMWBinOp.FSub else return .Sub, + .And => .And, + .Nand => .Nand, + .Or => .Or, + .Xor => .Xor, + .Max => if (is_signed) llvm.AtomicRMWBinOp.Max else return .UMax, + .Min => if (is_signed) llvm.AtomicRMWBinOp.Min else return .UMin, + }; +} diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 3fed3ca879..3bbc24e174 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -133,6 +133,9 @@ pub const Value = opaque { pub const constIntToPtr = LLVMConstIntToPtr; extern fn LLVMConstIntToPtr(ConstantVal: *const Value, ToType: *const Type) *const Value; + + pub const setOrdering = LLVMSetOrdering; + extern fn LLVMSetOrdering(MemoryAccessInst: *const Value, Ordering: AtomicOrdering) void; }; pub const Type = opaque { @@ -167,6 +170,9 @@ pub const Type = opaque { ElementCount: c_uint, Packed: Bool, ) void; + + pub const getTypeKind = LLVMGetTypeKind; + extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind; }; pub const Module = opaque { @@ -477,6 +483,14 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *const Value; + pub const buildIntToPtr = LLVMBuildIntToPtr; + extern fn LLVMBuildIntToPtr( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + pub const buildStructGEP = LLVMBuildStructGEP; extern fn LLVMBuildStructGEP( B: *const Builder, @@ -530,6 +544,16 @@ pub const Builder = opaque { singleThread: Bool, Name: [*:0]const u8, ) *const Value; + + pub const buildAtomicRmw = LLVMBuildAtomicRMW; + extern fn LLVMBuildAtomicRMW( + B: *const Builder, + op: AtomicRMWBinOp, + PTR: *const Value, + Val: *const Value, + ordering: AtomicOrdering, + singleThread: Bool, + ) *const Value; }; pub const IntPredicate = enum(c_uint) { @@ -901,3 +925,42 @@ pub const AtomicOrdering = enum(c_uint) { AcquireRelease = 6, SequentiallyConsistent = 7, }; + +pub const AtomicRMWBinOp = enum(c_int) { + Xchg, + Add, + Sub, + And, + Nand, + Or, + Xor, + Max, + Min, + UMax, + UMin, + FAdd, + FSub, +}; + +pub const TypeKind = enum(c_int) { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, + ScalableVector, + BFloat, + X86_AMX, +}; |
