aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2023-08-07 09:35:38 -0400
committerJacob Young <jacobly0@users.noreply.github.com>2023-08-08 21:32:50 -0400
commita0cb03ed99d0e157389d8afad4ab4f9c8e8ea2db (patch)
treeb7bac1f57b40c2f53e86fd8a2183c11add1a7d9c /src/codegen
parent49cc1bff086e9c521c110b35692a87f75e25c7ad (diff)
downloadzig-a0cb03ed99d0e157389d8afad4ab4f9c8e8ea2db.tar.gz
zig-a0cb03ed99d0e157389d8afad4ab4f9c8e8ea2db.zip
llvm: finish converting instructions
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/llvm.zig148
-rw-r--r--src/codegen/llvm/Builder.zig424
-rw-r--r--src/codegen/llvm/bindings.zig5
3 files changed, 362 insertions, 215 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 47cfe58904..3684237cb2 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -4911,8 +4911,8 @@ pub const FuncGen = struct {
.array_to_slice => try self.airArrayToSlice(inst),
.float_from_int => try self.airFloatFromInt(inst),
- .cmpxchg_weak => try self.airCmpxchg(inst, true),
- .cmpxchg_strong => try self.airCmpxchg(inst, false),
+ .cmpxchg_weak => try self.airCmpxchg(inst, .weak),
+ .cmpxchg_strong => try self.airCmpxchg(inst, .strong),
.fence => try self.airFence(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
.atomic_load => try self.airAtomicLoad(inst),
@@ -8723,15 +8723,20 @@ pub const FuncGen = struct {
return .none;
}
- fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !Builder.Value {
+ fn airCmpxchg(
+ self: *FuncGen,
+ inst: Air.Inst.Index,
+ kind: Builder.Function.Instruction.CmpXchg.Kind,
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr = try self.resolveInst(extra.ptr);
+ const ptr_ty = self.typeOf(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
- const operand_ty = self.typeOf(extra.ptr).childType(mod);
+ const operand_ty = ptr_ty.childType(mod);
const llvm_operand_ty = try o.lowerType(operand_ty);
const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
if (llvm_abi_ty != .none) {
@@ -8742,22 +8747,18 @@ pub const FuncGen = struct {
new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, "");
}
- const llvm_result_ty = try o.builder.structType(.normal, &.{
- if (llvm_abi_ty != .none) llvm_abi_ty else llvm_operand_ty,
- .i1,
- });
- const result = (try self.wip.unimplemented(llvm_result_ty, "")).finish(
- self.builder.buildAtomicCmpXchg(
- ptr.toLlvm(&self.wip),
- expected_value.toLlvm(&self.wip),
- new_value.toLlvm(&self.wip),
- @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.successOrder()))),
- @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.failureOrder()))),
- llvm.Bool.fromBool(self.sync_scope == .singlethread),
- ),
- &self.wip,
+ const result = try self.wip.cmpxchg(
+ kind,
+ if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
+ ptr,
+ expected_value,
+ new_value,
+ self.sync_scope,
+ toLlvmAtomicOrdering(extra.successOrder()),
+ toLlvmAtomicOrdering(extra.failureOrder()),
+ Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)),
+ "",
);
- result.toLlvm(&self.wip).setWeak(llvm.Bool.fromBool(is_weak));
const optional_ty = self.typeOfIndex(inst);
@@ -8789,63 +8790,54 @@ pub const FuncGen = struct {
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
- const single_threaded = llvm.Bool.fromBool(self.sync_scope == .singlethread);
- const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg);
+ const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .xchg);
const llvm_operand_ty = try o.lowerType(operand_ty);
+
+ const access_kind: Builder.MemoryAccessKind =
+ if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
+ const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+
if (llvm_abi_ty != .none) {
// operand needs widening and truncating or bitcasting.
- const casted_operand = try self.wip.cast(
- if (is_float) .bitcast else if (is_signed_int) .sext else .zext,
- @enumFromInt(@intFromEnum(operand)),
- llvm_abi_ty,
- "",
- );
-
- const uncasted_result = (try self.wip.unimplemented(llvm_abi_ty, "")).finish(
- self.builder.buildAtomicRmw(
- op,
- ptr.toLlvm(&self.wip),
- casted_operand.toLlvm(&self.wip),
- @enumFromInt(@intFromEnum(ordering)),
- single_threaded,
+ return self.wip.cast(if (is_float) .bitcast else .trunc, try self.wip.atomicrmw(
+ access_kind,
+ op,
+ ptr,
+ try self.wip.cast(
+ if (is_float) .bitcast else if (is_signed_int) .sext else .zext,
+ operand,
+ llvm_abi_ty,
+ "",
),
- &self.wip,
- );
-
- if (is_float) {
- return self.wip.cast(.bitcast, uncasted_result, llvm_operand_ty, "");
- } else {
- return self.wip.cast(.trunc, uncasted_result, llvm_operand_ty, "");
- }
+ self.sync_scope,
+ ordering,
+ ptr_alignment,
+ "",
+ ), llvm_operand_ty, "");
}
- if (!llvm_operand_ty.isPointer(&o.builder)) {
- return (try self.wip.unimplemented(llvm_operand_ty, "")).finish(
- self.builder.buildAtomicRmw(
- op,
- ptr.toLlvm(&self.wip),
- operand.toLlvm(&self.wip),
- @enumFromInt(@intFromEnum(ordering)),
- single_threaded,
- ),
- &self.wip,
- );
- }
+ if (!llvm_operand_ty.isPointer(&o.builder)) return self.wip.atomicrmw(
+ access_kind,
+ op,
+ ptr,
+ operand,
+ self.sync_scope,
+ ordering,
+ ptr_alignment,
+ "",
+ );
// It's a pointer but we need to treat it as an int.
- const llvm_usize = try o.lowerType(Type.usize);
- const casted_operand = try self.wip.cast(.ptrtoint, operand, llvm_usize, "");
- const uncasted_result = (try self.wip.unimplemented(llvm_usize, "")).finish(
- self.builder.buildAtomicRmw(
- op,
- ptr.toLlvm(&self.wip),
- casted_operand.toLlvm(&self.wip),
- @enumFromInt(@intFromEnum(ordering)),
- single_threaded,
- ),
- &self.wip,
- );
- return self.wip.cast(.inttoptr, uncasted_result, llvm_operand_ty, "");
+ return self.wip.cast(.inttoptr, try self.wip.atomicrmw(
+ access_kind,
+ op,
+ ptr,
+ try self.wip.cast(.ptrtoint, operand, try o.lowerType(Type.usize), ""),
+ self.sync_scope,
+ ordering,
+ ptr_alignment,
+ "",
+ ), llvm_operand_ty, "");
}
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -10581,17 +10573,17 @@ fn toLlvmAtomicRmwBinOp(
op: std.builtin.AtomicRmwOp,
is_signed: bool,
is_float: bool,
-) llvm.AtomicRMWBinOp {
+) Builder.Function.Instruction.AtomicRmw.Operation {
return switch (op) {
- .Xchg => .Xchg,
- .Add => if (is_float) .FAdd else return .Add,
- .Sub => if (is_float) .FSub else return .Sub,
- .And => .And,
- .Nand => .Nand,
- .Or => .Or,
- .Xor => .Xor,
- .Max => if (is_float) .FMax else if (is_signed) .Max else return .UMax,
- .Min => if (is_float) .FMin else if (is_signed) .Min else return .UMin,
+ .Xchg => .xchg,
+ .Add => if (is_float) .fadd else return .add,
+ .Sub => if (is_float) .fsub else return .sub,
+ .And => .@"and",
+ .Nand => .nand,
+ .Or => .@"or",
+ .Xor => .xor,
+ .Max => if (is_float) .fmax else if (is_signed) .max else return .umax,
+ .Min => if (is_float) .fmin else if (is_signed) .min else return .umin,
};
}
diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig
index 5495ea22e8..396580a664 100644
--- a/src/codegen/llvm/Builder.zig
+++ b/src/codegen/llvm/Builder.zig
@@ -2177,7 +2177,7 @@ pub const Global = struct {
if (!builder.useLibLlvm()) return;
const index = @intFromEnum(self.unwrap(builder));
const name_slice = self.name(builder).slice(builder) orelse "";
- builder.llvm.globals.items[index].setValueName2(name_slice.ptr, name_slice.len);
+ builder.llvm.globals.items[index].setValueName(name_slice.ptr, name_slice.len);
}
fn replaceAssumeCapacity(self: Index, other: Index, builder: *Builder) void {
@@ -3759,12 +3759,15 @@ pub const Function = struct {
arg,
ashr,
@"ashr exact",
+ atomicrmw,
bitcast,
block,
br,
br_cond,
call,
@"call fast",
+ cmpxchg,
+ @"cmpxchg weak",
extractelement,
extractvalue,
fadd,
@@ -3833,8 +3836,6 @@ pub const Function = struct {
inttoptr,
load,
@"load atomic",
- @"load atomic volatile",
- @"load volatile",
lshr,
@"lshr exact",
mul,
@@ -3865,8 +3866,6 @@ pub const Function = struct {
srem,
store,
@"store atomic",
- @"store atomic volatile",
- @"store volatile",
sub,
@"sub nsw",
@"sub nuw",
@@ -3879,7 +3878,6 @@ pub const Function = struct {
@"udiv exact",
urem,
uitofp,
- unimplemented,
@"unreachable",
va_arg,
xor,
@@ -3920,8 +3918,6 @@ pub const Function = struct {
.@"ret void",
.store,
.@"store atomic",
- .@"store atomic volatile",
- .@"store volatile",
.@"switch",
.@"unreachable",
=> false,
@@ -3933,7 +3929,6 @@ pub const Function = struct {
.@"notail call fast",
.@"tail call",
.@"tail call fast",
- .unimplemented,
=> self.typeOfWip(wip) != .void,
else => true,
};
@@ -4003,6 +3998,7 @@ pub const Function = struct {
),
.arg => wip.function.typeOf(wip.builder)
.functionParameters(wip.builder)[instruction.data],
+ .atomicrmw => wip.extraData(AtomicRmw, instruction.data).val.typeOfWip(wip),
.block => .label,
.br,
.br_cond,
@@ -4011,8 +4007,6 @@ pub const Function = struct {
.@"ret void",
.store,
.@"store atomic",
- .@"store atomic volatile",
- .@"store volatile",
.@"switch",
.@"unreachable",
=> .none,
@@ -4025,6 +4019,12 @@ pub const Function = struct {
.@"tail call",
.@"tail call fast",
=> wip.extraData(Call, instruction.data).ty.functionReturn(wip.builder),
+ .cmpxchg,
+ .@"cmpxchg weak",
+ => wip.builder.structTypeAssumeCapacity(.normal, &.{
+ wip.extraData(CmpXchg, instruction.data).cmp.typeOfWip(wip),
+ .i1,
+ }) catch unreachable,
.extractelement => wip.extraData(ExtractElement, instruction.data)
.val.typeOfWip(wip).childType(wip.builder),
.extractvalue => {
@@ -4096,8 +4096,6 @@ pub const Function = struct {
.insertvalue => wip.extraData(InsertValue, instruction.data).val.typeOfWip(wip),
.load,
.@"load atomic",
- .@"load atomic volatile",
- .@"load volatile",
=> wip.extraData(Load, instruction.data).type,
.phi,
.@"phi fast",
@@ -4112,7 +4110,6 @@ pub const Function = struct {
wip.builder,
);
},
- .unimplemented => @enumFromInt(instruction.data),
.va_arg => wip.extraData(VaArg, instruction.data).type,
};
}
@@ -4186,6 +4183,8 @@ pub const Function = struct {
),
.arg => function.global.typeOf(builder)
.functionParameters(builder)[instruction.data],
+ .atomicrmw => function.extraData(AtomicRmw, instruction.data)
+ .val.typeOf(function_index, builder),
.block => .label,
.br,
.br_cond,
@@ -4194,8 +4193,6 @@ pub const Function = struct {
.@"ret void",
.store,
.@"store atomic",
- .@"store atomic volatile",
- .@"store volatile",
.@"switch",
.@"unreachable",
=> .none,
@@ -4208,6 +4205,13 @@ pub const Function = struct {
.@"tail call",
.@"tail call fast",
=> function.extraData(Call, instruction.data).ty.functionReturn(builder),
+ .cmpxchg,
+ .@"cmpxchg weak",
+ => builder.structTypeAssumeCapacity(.normal, &.{
+ function.extraData(CmpXchg, instruction.data)
+ .cmp.typeOf(function_index, builder),
+ .i1,
+ }) catch unreachable,
.extractelement => function.extraData(ExtractElement, instruction.data)
.val.typeOf(function_index, builder).childType(builder),
.extractvalue => {
@@ -4282,8 +4286,6 @@ pub const Function = struct {
.val.typeOf(function_index, builder),
.load,
.@"load atomic",
- .@"load atomic volatile",
- .@"load volatile",
=> function.extraData(Load, instruction.data).type,
.phi,
.@"phi fast",
@@ -4298,7 +4300,6 @@ pub const Function = struct {
builder,
);
},
- .unimplemented => @enumFromInt(instruction.data),
.va_arg => function.extraData(VaArg, instruction.data).type,
};
}
@@ -4346,7 +4347,7 @@ pub const Function = struct {
return wip.llvm.instructions.items[@intFromEnum(self)];
}
- fn llvmName(self: Instruction.Index, wip: *const WipFunction) [*:0]const u8 {
+ fn llvmName(self: Instruction.Index, wip: *const WipFunction) [:0]const u8 {
return if (wip.builder.strip)
""
else
@@ -4419,15 +4420,49 @@ pub const Function = struct {
};
pub const Load = struct {
+ info: MemoryAccessInfo,
type: Type,
ptr: Value,
- info: MemoryAccessInfo,
};
pub const Store = struct {
+ info: MemoryAccessInfo,
val: Value,
ptr: Value,
+ };
+
+ pub const CmpXchg = struct {
info: MemoryAccessInfo,
+ ptr: Value,
+ cmp: Value,
+ new: Value,
+
+ pub const Kind = enum { strong, weak };
+ };
+
+ pub const AtomicRmw = struct {
+ info: MemoryAccessInfo,
+ ptr: Value,
+ val: Value,
+
+ pub const Operation = enum(u5) {
+ xchg,
+ add,
+ sub,
+ @"and",
+ nand,
+ @"or",
+ xor,
+ max,
+ min,
+ umax,
+ umin,
+ fadd,
+ fsub,
+ fmax,
+ fmin,
+ none = std.math.maxInt(u5),
+ };
};
pub const GetElementPtr = struct {
@@ -5163,21 +5198,21 @@ pub const WipFunction = struct {
pub fn load(
self: *WipFunction,
- kind: MemoryAccessKind,
+ access_kind: MemoryAccessKind,
ty: Type,
ptr: Value,
alignment: Alignment,
name: []const u8,
) Allocator.Error!Value {
- return self.loadAtomic(kind, ty, ptr, .system, .none, alignment, name);
+ return self.loadAtomic(access_kind, ty, ptr, .system, .none, alignment, name);
}
pub fn loadAtomic(
self: *WipFunction,
- kind: MemoryAccessKind,
+ access_kind: MemoryAccessKind,
ty: Type,
ptr: Value,
- scope: SyncScope,
+ sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
name: []const u8,
@@ -5186,22 +5221,21 @@ pub const WipFunction = struct {
try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0);
const instruction = try self.addInst(name, .{
.tag = switch (ordering) {
- .none => switch (kind) {
- .normal => .load,
- .@"volatile" => .@"load volatile",
- },
- else => switch (kind) {
- .normal => .@"load atomic",
- .@"volatile" => .@"load atomic volatile",
- },
+ .none => .load,
+ else => .@"load atomic",
},
.data = self.addExtraAssumeCapacity(Instruction.Load{
+ .info = .{
+ .access_kind = access_kind,
+ .sync_scope = switch (ordering) {
+ .none => .system,
+ else => sync_scope,
+ },
+ .success_ordering = ordering,
+ .alignment = alignment,
+ },
.type = ty,
.ptr = ptr,
- .info = .{ .scope = switch (ordering) {
- .none => .system,
- else => scope,
- }, .ordering = ordering, .alignment = alignment },
}),
});
if (self.builder.useLibLlvm()) {
@@ -5210,6 +5244,7 @@ pub const WipFunction = struct {
ptr.toLlvm(self),
instruction.llvmName(self),
);
+ if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering)));
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
@@ -5229,10 +5264,10 @@ pub const WipFunction = struct {
pub fn storeAtomic(
self: *WipFunction,
- kind: MemoryAccessKind,
+ access_kind: MemoryAccessKind,
val: Value,
ptr: Value,
- scope: SyncScope,
+ sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
) Allocator.Error!Instruction.Index {
@@ -5240,30 +5275,26 @@ pub const WipFunction = struct {
try self.ensureUnusedExtraCapacity(1, Instruction.Store, 0);
const instruction = try self.addInst(null, .{
.tag = switch (ordering) {
- .none => switch (kind) {
- .normal => .store,
- .@"volatile" => .@"store volatile",
- },
- else => switch (kind) {
- .normal => .@"store atomic",
- .@"volatile" => .@"store atomic volatile",
- },
+ .none => .store,
+ else => .@"store atomic",
},
.data = self.addExtraAssumeCapacity(Instruction.Store{
+ .info = .{
+ .access_kind = access_kind,
+ .sync_scope = switch (ordering) {
+ .none => .system,
+ else => sync_scope,
+ },
+ .success_ordering = ordering,
+ .alignment = alignment,
+ },
.val = val,
.ptr = ptr,
- .info = .{ .scope = switch (ordering) {
- .none => .system,
- else => scope,
- }, .ordering = ordering, .alignment = alignment },
}),
});
if (self.builder.useLibLlvm()) {
const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self));
- switch (kind) {
- .normal => {},
- .@"volatile" => llvm_instruction.setVolatile(.True),
- }
+ if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering)));
if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
@@ -5273,7 +5304,7 @@ pub const WipFunction = struct {
pub fn fence(
self: *WipFunction,
- scope: SyncScope,
+ sync_scope: SyncScope,
ordering: AtomicOrdering,
) Allocator.Error!Instruction.Index {
assert(ordering != .none);
@@ -5281,21 +5312,130 @@ pub const WipFunction = struct {
const instruction = try self.addInst(null, .{
.tag = .fence,
.data = @bitCast(MemoryAccessInfo{
- .scope = scope,
- .ordering = ordering,
- .alignment = undefined,
+ .sync_scope = sync_scope,
+ .success_ordering = ordering,
}),
});
if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
self.llvm.builder.buildFence(
@enumFromInt(@intFromEnum(ordering)),
- llvm.Bool.fromBool(scope == .singlethread),
+ llvm.Bool.fromBool(sync_scope == .singlethread),
"",
),
);
return instruction;
}
+ pub fn cmpxchg(
+ self: *WipFunction,
+ kind: Instruction.CmpXchg.Kind,
+ access_kind: MemoryAccessKind,
+ ptr: Value,
+ cmp: Value,
+ new: Value,
+ sync_scope: SyncScope,
+ success_ordering: AtomicOrdering,
+ failure_ordering: AtomicOrdering,
+ alignment: Alignment,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(ptr.typeOfWip(self).isPointer(self.builder));
+ const ty = cmp.typeOfWip(self);
+ assert(ty == new.typeOfWip(self));
+ assert(success_ordering != .none);
+ assert(failure_ordering != .none);
+
+ _ = try self.builder.structType(.normal, &.{ ty, .i1 });
+ try self.ensureUnusedExtraCapacity(1, Instruction.CmpXchg, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = switch (kind) {
+ .strong => .cmpxchg,
+ .weak => .@"cmpxchg weak",
+ },
+ .data = self.addExtraAssumeCapacity(Instruction.CmpXchg{
+ .info = .{
+ .access_kind = access_kind,
+ .sync_scope = sync_scope,
+ .success_ordering = success_ordering,
+ .failure_ordering = failure_ordering,
+ .alignment = alignment,
+ },
+ .ptr = ptr,
+ .cmp = cmp,
+ .new = new,
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ const llvm_instruction = self.llvm.builder.buildAtomicCmpXchg(
+ ptr.toLlvm(self),
+ cmp.toLlvm(self),
+ new.toLlvm(self),
+ @enumFromInt(@intFromEnum(success_ordering)),
+ @enumFromInt(@intFromEnum(failure_ordering)),
+ llvm.Bool.fromBool(sync_scope == .singlethread),
+ );
+ if (kind == .weak) llvm_instruction.setWeak(.True);
+ if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
+ if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
+ const llvm_name = instruction.llvmName(self);
+ if (llvm_name.len > 0) llvm_instruction.setValueName(
+ llvm_name.ptr,
+ @intCast(llvm_name.len),
+ );
+ self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
+ }
+ return instruction.toValue();
+ }
+
+ pub fn atomicrmw(
+ self: *WipFunction,
+ access_kind: MemoryAccessKind,
+ operation: Instruction.AtomicRmw.Operation,
+ ptr: Value,
+ val: Value,
+ sync_scope: SyncScope,
+ ordering: AtomicOrdering,
+ alignment: Alignment,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(ptr.typeOfWip(self).isPointer(self.builder));
+ assert(ordering != .none);
+
+ try self.ensureUnusedExtraCapacity(1, Instruction.AtomicRmw, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .atomicrmw,
+ .data = self.addExtraAssumeCapacity(Instruction.AtomicRmw{
+ .info = .{
+ .access_kind = access_kind,
+ .atomic_rmw_operation = operation,
+ .sync_scope = sync_scope,
+ .success_ordering = ordering,
+ .alignment = alignment,
+ },
+ .ptr = ptr,
+ .val = val,
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ const llvm_instruction = self.llvm.builder.buildAtomicRmw(
+ @enumFromInt(@intFromEnum(operation)),
+ ptr.toLlvm(self),
+ val.toLlvm(self),
+ @enumFromInt(@intFromEnum(ordering)),
+ llvm.Bool.fromBool(sync_scope == .singlethread),
+ );
+ if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True);
+ if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes));
+ const llvm_name = instruction.llvmName(self);
+ if (llvm_name.len > 0) llvm_instruction.setValueName(
+ llvm_name.ptr,
+ @intCast(llvm_name.len),
+ );
+ self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
+ }
+ return instruction.toValue();
+ }
+
pub fn gep(
self: *WipFunction,
kind: Instruction.GetElementPtr.Kind,
@@ -5747,30 +5887,6 @@ pub const WipFunction = struct {
return instruction.toValue();
}
- pub const WipUnimplemented = struct {
- instruction: Instruction.Index,
-
- pub fn finish(self: WipUnimplemented, val: *llvm.Value, wip: *WipFunction) Value {
- assert(wip.builder.useLibLlvm());
- wip.llvm.instructions.items[@intFromEnum(self.instruction)] = val;
- return self.instruction.toValue();
- }
- };
-
- pub fn unimplemented(
- self: *WipFunction,
- ty: Type,
- name: []const u8,
- ) Allocator.Error!WipUnimplemented {
- try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
- const instruction = try self.addInst(name, .{
- .tag = .unimplemented,
- .data = @intFromEnum(ty),
- });
- if (self.builder.useLibLlvm()) _ = self.llvm.instructions.addOneAssumeCapacity();
- return .{ .instruction = instruction };
- }
-
pub fn finish(self: *WipFunction) Allocator.Error!void {
const gpa = self.builder.gpa;
const function = self.function.ptr(self.builder);
@@ -6035,19 +6151,19 @@ pub const WipFunction = struct {
.arg,
.block,
=> unreachable,
+ .atomicrmw => {
+ const extra = self.extraData(Instruction.AtomicRmw, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.AtomicRmw{
+ .info = extra.info,
+ .ptr = instructions.map(extra.ptr),
+ .val = instructions.map(extra.val),
+ });
+ },
.br,
.fence,
.@"ret void",
- .unimplemented,
.@"unreachable",
=> {},
- .extractelement => {
- const extra = self.extraData(Instruction.ExtractElement, instruction.data);
- instruction.data = wip_extra.addExtra(Instruction.ExtractElement{
- .val = instructions.map(extra.val),
- .index = instructions.map(extra.index),
- });
- },
.br_cond => {
const extra = self.extraData(Instruction.BrCond, instruction.data);
instruction.data = wip_extra.addExtra(Instruction.BrCond{
@@ -6076,6 +6192,24 @@ pub const WipFunction = struct {
});
wip_extra.appendMappedValues(args, instructions);
},
+ .cmpxchg,
+ .@"cmpxchg weak",
+ => {
+ const extra = self.extraData(Instruction.CmpXchg, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.CmpXchg{
+ .info = extra.info,
+ .ptr = instructions.map(extra.ptr),
+ .cmp = instructions.map(extra.cmp),
+ .new = instructions.map(extra.new),
+ });
+ },
+ .extractelement => {
+ const extra = self.extraData(Instruction.ExtractElement, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.ExtractElement{
+ .val = instructions.map(extra.val),
+ .index = instructions.map(extra.index),
+ });
+ },
.extractvalue => {
var extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data);
const indices = extra.trail.next(extra.data.indices_len, u32, self);
@@ -6121,8 +6255,6 @@ pub const WipFunction = struct {
},
.load,
.@"load atomic",
- .@"load atomic volatile",
- .@"load volatile",
=> {
const extra = self.extraData(Instruction.Load, instruction.data);
instruction.data = wip_extra.addExtra(Instruction.Load{
@@ -6164,8 +6296,6 @@ pub const WipFunction = struct {
},
.store,
.@"store atomic",
- .@"store atomic volatile",
- .@"store volatile",
=> {
const extra = self.extraData(Instruction.Store, instruction.data);
instruction.data = wip_extra.addExtra(Instruction.Store{
@@ -6619,6 +6749,15 @@ pub const IntegerCondition = enum(u6) {
pub const MemoryAccessKind = enum(u1) {
normal,
@"volatile",
+
+ pub fn format(
+ self: MemoryAccessKind,
+ comptime prefix: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .normal) try writer.print("{s}{s}", .{ prefix, @tagName(self) });
+ }
};
pub const SyncScope = enum(u1) {
@@ -6632,7 +6771,7 @@ pub const SyncScope = enum(u1) {
writer: anytype,
) @TypeOf(writer).Error!void {
if (self != .system) try writer.print(
- \\{s} syncscope("{s}")
+ \\{s}syncscope("{s}")
, .{ prefix, @tagName(self) });
}
};
@@ -6652,15 +6791,18 @@ pub const AtomicOrdering = enum(u3) {
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- if (self != .none) try writer.print("{s} {s}", .{ prefix, @tagName(self) });
+ if (self != .none) try writer.print("{s}{s}", .{ prefix, @tagName(self) });
}
};
const MemoryAccessInfo = packed struct(u32) {
- scope: SyncScope,
- ordering: AtomicOrdering,
- alignment: Alignment,
- _: u22 = undefined,
+ access_kind: MemoryAccessKind = .normal,
+ atomic_rmw_operation: Function.Instruction.AtomicRmw.Operation = .none,
+ sync_scope: SyncScope,
+ success_ordering: AtomicOrdering,
+ failure_ordering: AtomicOrdering = .none,
+ alignment: Alignment = .default,
+ _: u13 = undefined,
};
pub const FastMath = packed struct(u32) {
@@ -7542,7 +7684,7 @@ pub fn init(options: Options) InitError!Builder {
if (options.name.len > 0) self.source_filename = try self.string(options.name);
self.initializeLLVMTarget(options.target.cpu.arch);
if (self.useLibLlvm()) self.llvm.module = llvm.Module.createWithName(
- (self.source_filename.slice(&self) orelse "").ptr,
+ (self.source_filename.slice(&self) orelse ""),
self.llvm.context,
);
@@ -8983,6 +9125,21 @@ pub fn printUnbuffered(
});
},
.arg => unreachable,
+ .atomicrmw => |tag| {
+ const extra =
+ function.extraData(Function.Instruction.AtomicRmw, instruction.data);
+ try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.info.access_kind,
+ @tagName(extra.info.atomic_rmw_operation),
+ extra.ptr.fmt(function_index, self),
+ extra.val.fmt(function_index, self),
+ extra.info.sync_scope,
+ extra.info.success_ordering,
+ extra.info.alignment,
+ });
+ },
.block => {
block_incoming_len = instruction.data;
const name = instruction_index.name(&function);
@@ -9056,6 +9213,24 @@ pub fn printUnbuffered(
});
try writer.writeByte('\n');
},
+ .cmpxchg,
+ .@"cmpxchg weak",
+ => |tag| {
+ const extra =
+ function.extraData(Function.Instruction.CmpXchg, instruction.data);
+ try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.info.access_kind,
+ extra.ptr.fmt(function_index, self),
+ extra.cmp.fmt(function_index, self),
+ extra.new.fmt(function_index, self),
+ extra.info.sync_scope,
+ extra.info.success_ordering,
+ extra.info.failure_ordering,
+ extra.info.alignment,
+ });
+ },
.extractelement => |tag| {
const extra = function.extraData(
Function.Instruction.ExtractElement,
@@ -9084,7 +9259,11 @@ pub fn printUnbuffered(
},
.fence => |tag| {
const info: MemoryAccessInfo = @bitCast(instruction.data);
- try writer.print(" {s}{}{}", .{ @tagName(tag), info.scope, info.ordering });
+ try writer.print(" {s}{ }{ }", .{
+ @tagName(tag),
+ info.sync_scope,
+ info.success_ordering,
+ });
},
.fneg,
.@"fneg fast",
@@ -9145,18 +9324,17 @@ pub fn printUnbuffered(
},
.load,
.@"load atomic",
- .@"load atomic volatile",
- .@"load volatile",
=> |tag| {
const extra =
function.extraData(Function.Instruction.Load, instruction.data);
- try writer.print(" %{} = {s} {%}, {%}{}{}{, }\n", .{
+ try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }\n", .{
instruction_index.name(&function).fmt(self),
@tagName(tag),
+ extra.info.access_kind,
extra.type.fmt(self),
extra.ptr.fmt(function_index, self),
- extra.info.scope,
- extra.info.ordering,
+ extra.info.sync_scope,
+ extra.info.success_ordering,
extra.info.alignment,
});
},
@@ -9220,17 +9398,16 @@ pub fn printUnbuffered(
},
.store,
.@"store atomic",
- .@"store atomic volatile",
- .@"store volatile",
=> |tag| {
const extra =
function.extraData(Function.Instruction.Store, instruction.data);
- try writer.print(" {s} {%}, {%}{}{}{, }\n", .{
+ try writer.print(" {s}{ } {%}, {%}{ }{ }{, }\n", .{
@tagName(tag),
+ extra.info.access_kind,
extra.val.fmt(function_index, self),
extra.ptr.fmt(function_index, self),
- extra.info.scope,
- extra.info.ordering,
+ extra.info.sync_scope,
+ extra.info.success_ordering,
extra.info.alignment,
});
},
@@ -9254,25 +9431,6 @@ pub fn printUnbuffered(
);
try writer.writeAll(" ]\n");
},
- .unimplemented => |tag| {
- const ty: Type = @enumFromInt(instruction.data);
- if (true) {
- try writer.writeAll(" ");
- switch (ty) {
- .none, .void => {},
- else => try writer.print("%{} = ", .{
- instruction_index.name(&function).fmt(self),
- }),
- }
- try writer.print("{s} {%}\n", .{ @tagName(tag), ty.fmt(self) });
- } else switch (ty) {
- .none, .void => {},
- else => try writer.print(" %{} = load {%}, ptr undef\n", .{
- instruction_index.name(&function).fmt(self),
- ty.fmt(self),
- }),
- }
- },
.va_arg => |tag| {
const extra =
function.extraData(Function.Instruction.VaArg, instruction.data);
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index d5a1445176..ccfdd9407c 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -330,10 +330,7 @@ pub const Value = opaque {
pub const fnSetSubprogram = ZigLLVMFnSetSubprogram;
extern fn ZigLLVMFnSetSubprogram(f: *Value, subprogram: *DISubprogram) void;
- pub const setValueName = LLVMSetValueName;
- extern fn LLVMSetValueName(Val: *Value, Name: [*:0]const u8) void;
-
- pub const setValueName2 = LLVMSetValueName2;
+ pub const setValueName = LLVMSetValueName2;
extern fn LLVMSetValueName2(Val: *Value, Name: [*]const u8, NameLen: usize) void;
pub const getValueName = LLVMGetValueName;