From f938404a45d347fe448351c70cc34db98700e068 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 6 Aug 2023 01:17:47 -0400 Subject: Builder: fix attribute spacing --- src/codegen/llvm/Builder.zig | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 640cde8409..d500b5d362 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1151,13 +1151,13 @@ pub const Attribute = union(Kind) { .sret, .elementtype, => |ty| try writer.print(" {s}({%})", .{ @tagName(attribute), ty.fmt(data.builder) }), - .@"align" => |alignment| try writer.print("{}", .{alignment}), + .@"align" => |alignment| try writer.print("{ }", .{alignment}), .dereferenceable, .dereferenceable_or_null, => |size| try writer.print(" {s}({d})", .{ @tagName(attribute), size }), .nofpclass => |fpclass| { const Int = @typeInfo(FpClass).Struct.backing_integer.?; - try writer.print("{s}(", .{@tagName(attribute)}); + try writer.print(" {s}(", .{@tagName(attribute)}); var any = false; var remaining: Int = @bitCast(fpclass); inline for (@typeInfo(FpClass).Struct.decls) |decl| { @@ -1175,13 +1175,13 @@ pub const Attribute = union(Kind) { }, .alignstack => |alignment| try writer.print( if (comptime std.mem.indexOfScalar(u8, fmt_str, '#') != null) - "{s}={d}" + " {s}={d}" else - "{s}({d})", + " {s}({d})", .{ @tagName(attribute), alignment.toByteUnits() orelse return }, ), .allockind => |allockind| { - try writer.print("{s}(\"", .{@tagName(attribute)}); + try writer.print(" {s}(\"", .{@tagName(attribute)}); var any = false; inline for (@typeInfo(AllocKind).Struct.fields) |field| { if (comptime std.mem.eql(u8, field.name, "_")) continue; @@ -1196,22 +1196,22 @@ pub const Attribute = union(Kind) { try writer.writeAll("\")"); }, .allocsize => |allocsize| { - try writer.print("{s}({d}", .{ @tagName(attribute), allocsize.elem_size }); + try writer.print(" {s}({d}", .{ @tagName(attribute), allocsize.elem_size }); if (allocsize.num_elems != AllocSize.none) try writer.print(",{d}", .{allocsize.num_elems}); try writer.writeByte(')'); }, - .memory => |memory| try writer.print("{s}({s}, argmem: {s}, inaccessiblemem: {s})", .{ + .memory => |memory| try writer.print(" {s}({s}, argmem: {s}, inaccessiblemem: {s})", .{ @tagName(attribute), @tagName(memory.other), @tagName(memory.argmem), @tagName(memory.inaccessiblemem), }), .uwtable => |uwtable| if (uwtable != .none) { - try writer.writeAll(@tagName(attribute)); + try writer.print(" {s}", .{@tagName(attribute)}); if (uwtable != UwTable.default) try writer.print("({s})", .{@tagName(uwtable)}); }, - .vscale_range => |vscale_range| try writer.print("{s}({d},{d})", .{ + .vscale_range => |vscale_range| try writer.print(" {s}({d},{d})", .{ @tagName(attribute), vscale_range.min.toByteUnits().?, vscale_range.max.toByteUnits() orelse 0, @@ -1867,7 +1867,7 @@ pub const AddrSpace = enum(u24) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self != .default) try writer.print("{s} addrspace({d})", .{ prefix, @intFromEnum(self) }); + if (self != .default) try writer.print("{s}addrspace({d})", .{ prefix, @intFromEnum(self) }); } }; @@ -1908,7 +1908,7 @@ pub const Alignment = enum(u6) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try writer.print("{s} align {d}", .{ prefix, self.toByteUnits() orelse return }); + try writer.print("{s}align {d}", .{ prefix, self.toByteUnits() orelse return }); } }; @@ -7413,7 +7413,7 @@ pub fn printUnbuffered( if (variable.global.getReplacement(self) != .none) continue; const global = variable.global.ptrConst(self); try writer.print( - \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{,} + \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{, } \\ , .{ variable.global.fmt(self), @@ -7612,7 +7612,7 @@ pub fn printUnbuffered( => |tag| { const extra = function.extraData(Function.Instruction.Alloca, instruction.data); - try writer.print(" %{} = {s} {%}{,%}{,}{,}\n", .{ + try writer.print(" %{} = {s} {%}{,%}{, }{, }\n", .{ instruction_index.name(&function).fmt(self), @tagName(tag), extra.type.fmt(self), @@ -7840,7 +7840,7 @@ pub fn printUnbuffered( => |tag| { const extra = function.extraData(Function.Instruction.Load, instruction.data); - try writer.print(" %{} = {s} {%}, {%}{}{}{,}\n", .{ + try writer.print(" %{} = {s} {%}, {%}{}{}{, }\n", .{ instruction_index.name(&function).fmt(self), @tagName(tag), extra.type.fmt(self), @@ -7908,7 +7908,7 @@ pub fn printUnbuffered( => |tag| { const extra = function.extraData(Function.Instruction.Store, instruction.data); - try writer.print(" {s} {%}, {%}{}{}{,}\n", .{ + try writer.print(" {s} {%}, {%}{}{}{, }\n", .{ @tagName(tag), extra.val.fmt(function_index, self), extra.ptr.fmt(function_index, self), -- cgit v1.2.3 From 3ebf8ce9704647686a9f9654c59e0be17bc78984 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 6 Aug 2023 02:01:29 -0400 Subject: Builder: fix builtin pseudo-instruction dumping --- src/codegen/llvm/Builder.zig | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index d500b5d362..95c1218b70 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -7727,7 +7727,14 @@ pub fn printUnbuffered( }, .fneg, .@"fneg fast", - .ret, + => |tag| { + const val: Value = @enumFromInt(instruction.data); + try writer.print(" %{} = {s} {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + val.fmt(function_index, self), + }); + }, .@"llvm.ceil.", .@"llvm.cos.", .@"llvm.exp.", @@ -7746,8 +7753,12 @@ pub fn printUnbuffered( .@"llvm.ctpop.", => |tag| { const val: Value = @enumFromInt(instruction.data); - try writer.print(" {s} {%}\n", .{ + const ty = val.typeOf(function_index, self); + try writer.print(" %{} = call {%} @{s}{m}({%})\n", .{ + instruction_index.name(&function).fmt(self), + ty.fmt(self), @tagName(tag), + ty.fmt(self), val.fmt(function_index, self), }); }, @@ -7872,6 +7883,13 @@ pub fn printUnbuffered( } try writer.writeByte('\n'); }, + .ret => |tag| { + const val: Value = @enumFromInt(instruction.data); + try writer.print(" {s} {%}\n", .{ + @tagName(tag), + val.fmt(function_index, self), + }); + }, .@"ret void", .@"unreachable", => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), @@ -7966,13 +7984,14 @@ pub fn printUnbuffered( extra.type.fmt(self), }); }, - .@"llvm.fma." => { + .@"llvm.fma." => |tag| { const extra = function.extraData(Function.Instruction.FusedMultiplyAdd, instruction.data); const ty = instruction_index.typeOf(function_index, self); - try writer.print(" %{} = call {%} @llvm.fma.{m}({%}, {%}, {%})\n", .{ + try writer.print(" %{} = call {%} @{s}{m}({%}, {%}, {%})\n", .{ instruction_index.name(&function).fmt(self), ty.fmt(self), + @tagName(tag), ty.fmt(self), extra.a.fmt(function_index, self), extra.b.fmt(function_index, self), -- cgit v1.2.3 From b63d9745b5d7413acea4f4723ac9f696e3fb8149 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 6 Aug 2023 07:16:39 -0400 Subject: llvm: convert intrinsics to using `Builder` --- src/codegen/llvm.zig | 700 +++++++-------------- src/codegen/llvm/Builder.zig | 1376 ++++++++++++++++++++++++++++++----------- src/codegen/llvm/bindings.zig | 126 ---- src/zig_llvm.cpp | 347 +---------- src/zig_llvm.h | 82 +-- 5 files changed, 1260 insertions(+), 1371 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d9b99ec6d0..48e45400cd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4962,9 +4962,9 @@ pub const FuncGen = struct { .mul_wrap => try self.airMulWrap(inst), .mul_sat => try self.airMulSat(inst), - .add_safe => try self.airSafeArithmetic(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), - .sub_safe => try self.airSafeArithmetic(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"), - .mul_safe => try self.airSafeArithmetic(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), + .add_safe => try self.airSafeArithmetic(inst, .@"sadd.with.overflow", .@"uadd.with.overflow"), + .sub_safe => try self.airSafeArithmetic(inst, .@"ssub.with.overflow", .@"usub.with.overflow"), + .mul_safe => try self.airSafeArithmetic(inst, .@"smul.with.overflow", .@"umul.with.overflow"), .div_float => try self.airDivFloat(inst, false), .div_trunc => try self.airDivTrunc(inst, false), @@ -4989,9 +4989,9 @@ pub const FuncGen = struct { .rem_optimized => try self.airRem(inst, true), .mod_optimized => try self.airMod(inst, true), - .add_with_overflow => try self.airOverflow(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"), - .sub_with_overflow => try self.airOverflow(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"), - .mul_with_overflow => try self.airOverflow(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"), + .add_with_overflow => try self.airOverflow(inst, .@"sadd.with.overflow", .@"uadd.with.overflow"), + .sub_with_overflow => try self.airOverflow(inst, .@"ssub.with.overflow", .@"usub.with.overflow"), + .mul_with_overflow => try self.airOverflow(inst, .@"smul.with.overflow", .@"umul.with.overflow"), .shl_with_overflow => try self.airShlWithOverflow(inst), .bit_and, .bool_and => try self.airAnd(inst), @@ -5100,11 +5100,11 @@ pub const FuncGen = struct { .memcpy => try self.airMemcpy(inst), .set_union_tag => try self.airSetUnionTag(inst), .get_union_tag => try self.airGetUnionTag(inst), - .clz => try self.airClzCtz(inst, .@"llvm.ctlz."), - .ctz => try self.airClzCtz(inst, .@"llvm.cttz."), - .popcount => try self.airBitOp(inst, .@"llvm.ctpop."), + .clz => try self.airClzCtz(inst, .ctlz), + .ctz => try self.airClzCtz(inst, .cttz), + .popcount => try self.airBitOp(inst, .ctpop), .byte_swap => try self.airByteSwap(inst), - .bit_reverse => try self.airBitOp(inst, .@"llvm.bitreverse."), + .bit_reverse => try self.airBitOp(inst, .bitreverse), .tag_name => try self.airTagName(inst), .error_name => try self.airErrorName(inst), .splat => try self.airSplat(inst), @@ -5645,22 +5645,7 @@ pub const FuncGen = struct { const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - const llvm_fn_name = "llvm.va_copy"; - const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal); - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse - o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - - const args: [2]*llvm.Value = .{ dest_list.toLlvm(&self.wip), src_list.toLlvm(&self.wip) }; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); - + _ = try self.wip.callIntrinsic(.va_copy, &.{}, &.{ dest_list, src_list }, ""); return if (isByRef(va_list_ty, mod)) dest_list else @@ -5668,25 +5653,10 @@ pub const FuncGen = struct { } fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; - const list = try self.resolveInst(un_op); - - const llvm_fn_name = "llvm.va_end"; - const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse - o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); + const src_list = try self.resolveInst(un_op); - const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); + _ = try self.wip.callIntrinsic(.va_end, &.{}, &.{src_list}, ""); return .none; } @@ -5697,28 +5667,13 @@ pub const FuncGen = struct { const llvm_va_list_ty = try o.lowerType(va_list_ty); const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); - const list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - - const llvm_fn_name = "llvm.va_start"; - const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse - o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - - const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); + const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); + _ = try self.wip.callIntrinsic(.va_start, &.{}, &.{dest_list}, ""); return if (isByRef(va_list_ty, mod)) - list + dest_list else - try self.wip.load(.normal, llvm_va_list_ty, list, result_alignment, ""); + try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); } fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !Builder.Value { @@ -7570,40 +7525,18 @@ pub const FuncGen = struct { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.size", &.{.i32}); - const args: [1]*llvm.Value = .{ - (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), - }; - return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.i32, &.{.i32}, .normal)).toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); + return self.wip.callIntrinsic(.@"wasm.memory.size", &.{.i32}, &.{ + try o.builder.intValue(.i32, index), + }, ""); } fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - const operand = try self.resolveInst(pl_op.operand); - const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32}); - const args: [2]*llvm.Value = .{ - (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), - operand.toLlvm(&self.wip), - }; - return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.i32, &.{ .i32, .i32 }, .normal)).toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); + return self.wip.callIntrinsic(.@"wasm.memory.grow", &.{.i32}, &.{ + try o.builder.intValue(.i32, index), try self.resolveInst(pl_op.operand), + }, ""); } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -7636,13 +7569,16 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.typeOfIndex(inst).scalarType(mod); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"llvm.smin." - else - .@"llvm.umin.", lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, inst_ty, 2, .{ lhs, rhs }); + return self.wip.callIntrinsic( + if (scalar_ty.isSignedInt(mod)) .smin else .umin, + &.{try o.lowerType(inst_ty)}, + &.{ lhs, rhs }, + "", + ); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -7651,13 +7587,16 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.typeOfIndex(inst).scalarType(mod); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"llvm.smax." - else - .@"llvm.umax.", lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, inst_ty, 2, .{ lhs, rhs }); + return self.wip.callIntrinsic( + if (scalar_ty.isSignedInt(mod)) .smax else .umax, + &.{try o.lowerType(inst_ty)}, + &.{ lhs, rhs }, + "", + ); } fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -7688,8 +7627,8 @@ pub const FuncGen = struct { fn airSafeArithmetic( fg: *FuncGen, inst: Air.Inst.Index, - signed_intrinsic: []const u8, - unsigned_intrinsic: []const u8, + signed_intrinsic: Builder.Intrinsic, + unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = fg.dg.object; const mod = o.module; @@ -7699,36 +7638,19 @@ pub const FuncGen = struct { const rhs = try fg.resolveInst(bin_op.rhs); const inst_ty = fg.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - const is_scalar = scalar_ty.ip_index == inst_ty.ip_index; - const intrinsic_name = switch (scalar_ty.isSignedInt(mod)) { - true => signed_intrinsic, - false => unsigned_intrinsic, - }; + const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_inst_ty = try o.lowerType(inst_ty); - const llvm_ret_ty = try o.builder.structType(.normal, &.{ - llvm_inst_ty, - try llvm_inst_ty.changeScalar(.i1, &o.builder), - }); - const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_inst_ty, llvm_inst_ty }, .normal); - const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); - const result_struct = (try fg.wip.unimplemented(llvm_ret_ty, "")).finish(fg.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - &[_]*llvm.Value{ lhs.toLlvm(&fg.wip), rhs.toLlvm(&fg.wip) }, - 2, - .Fast, - .Auto, - "", - ), &fg.wip); - const overflow_bit = try fg.wip.extractValue(result_struct, &.{1}, ""); - const scalar_overflow_bit = switch (is_scalar) { - true => overflow_bit, - false => (try fg.wip.unimplemented(.i1, "")).finish( + const results = try fg.wip.callIntrinsic(intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); + + const overflow_bit = try fg.wip.extractValue(results, &.{1}, ""); + const scalar_overflow_bit = if (llvm_inst_ty.isVector(&o.builder)) + (try fg.wip.unimplemented(.i1, "")).finish( fg.builder.buildOrReduce(overflow_bit.toLlvm(&fg.wip)), &fg.wip, - ), - }; + ) + else + overflow_bit; const fail_block = try fg.wip.block(1, "OverflowFail"); const ok_block = try fg.wip.block(1, "OverflowOk"); @@ -7738,7 +7660,7 @@ pub const FuncGen = struct { try fg.buildSimplePanic(.integer_overflow); fg.wip.cursor = .{ .block = ok_block }; - return fg.wip.extractValue(result_struct, &.{0}, ""); + return fg.wip.extractValue(results, &.{0}, ""); } fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -7759,10 +7681,12 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"llvm.sadd.sat." - else - .@"llvm.uadd.sat.", lhs, rhs, ""); + return self.wip.callIntrinsic( + if (scalar_ty.isSignedInt(mod)) .@"sadd.sat" else .@"uadd.sat", + &.{try o.lowerType(inst_ty)}, + &.{ lhs, rhs }, + "", + ); } fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { @@ -7798,10 +7722,12 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"llvm.ssub.sat." - else - .@"llvm.usub.sat.", lhs, rhs, ""); + return self.wip.callIntrinsic( + if (scalar_ty.isSignedInt(mod)) .@"ssub.sat" else .@"usub.sat", + &.{try o.lowerType(inst_ty)}, + &.{ lhs, rhs }, + "", + ); } fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { @@ -7837,10 +7763,12 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"llvm.smul.fix.sat." - else - .@"llvm.umul.fix.sat.", lhs, rhs, ""); + return self.wip.callIntrinsic( + if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat", + &.{try o.lowerType(inst_ty)}, + &.{ lhs, rhs, try o.builder.intValue(.i32, 0) }, + "", + ); } fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { @@ -8028,8 +7956,8 @@ pub const FuncGen = struct { fn airOverflow( self: *FuncGen, inst: Air.Inst.Index, - signed_intrinsic: []const u8, - unsigned_intrinsic: []const u8, + signed_intrinsic: Builder.Intrinsic, + unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { const o = self.dg.object; const mod = o.module; @@ -8041,48 +7969,29 @@ pub const FuncGen = struct { const lhs_ty = self.typeOf(extra.lhs); const scalar_ty = lhs_ty.scalarType(mod); - const dest_ty = self.typeOfIndex(inst); - - const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; + const inst_ty = self.typeOfIndex(inst); - const llvm_dest_ty = try o.lowerType(dest_ty); + const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; + const llvm_inst_ty = try o.lowerType(inst_ty); const llvm_lhs_ty = try o.lowerType(lhs_ty); + const results = try self.wip.callIntrinsic(intrinsic, &.{llvm_lhs_ty}, &.{ lhs, rhs }, ""); - const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); - const llvm_ret_ty = try o.builder.structType( - .normal, - &.{ llvm_lhs_ty, try llvm_lhs_ty.changeScalar(.i1, &o.builder) }, - ); - const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_lhs_ty, llvm_lhs_ty }, .normal); - const result_struct = (try self.wip.unimplemented(llvm_ret_ty, "")).finish( - self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - &[_]*llvm.Value{ lhs.toLlvm(&self.wip), rhs.toLlvm(&self.wip) }, - 2, - .Fast, - .Auto, - "", - ), - &self.wip, - ); - - const result = try self.wip.extractValue(result_struct, &.{0}, ""); - const overflow_bit = try self.wip.extractValue(result_struct, &.{1}, ""); + const result_val = try self.wip.extractValue(results, &.{0}, ""); + const overflow_bit = try self.wip.extractValue(results, &.{1}, ""); - const result_index = llvmField(dest_ty, 0, mod).?.index; - const overflow_index = llvmField(dest_ty, 1, mod).?.index; + const result_index = llvmField(inst_ty, 0, mod).?.index; + const overflow_index = llvmField(inst_ty, 1, mod).?.index; - if (isByRef(dest_ty, mod)) { - const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod)); - const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); + if (isByRef(inst_ty, mod)) { + const result_alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_inst_ty, result_alignment); { - const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); - _ = try self.wip.store(.normal, result, field_ptr, result_alignment); + const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, ""); + _ = try self.wip.store(.normal, result_val, field_ptr, result_alignment); } { const overflow_alignment = comptime Builder.Alignment.fromByteUnits(1); - const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, ""); + const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, overflow_index, ""); _ = try self.wip.store(.normal, overflow_bit, field_ptr, overflow_alignment); } @@ -8090,9 +7999,9 @@ pub const FuncGen = struct { } var fields: [2]Builder.Value = undefined; - fields[result_index] = result; + fields[result_index] = result_val; fields[overflow_index] = overflow_bit; - return self.wip.buildAggregate(llvm_dest_ty, &fields, ""); + return self.wip.buildAggregate(llvm_inst_ty, &fields, ""); } fn buildElementwiseCall( @@ -8140,22 +8049,7 @@ pub const FuncGen = struct { .function => |function| function, else => unreachable, }; - - const fn_type = try o.builder.fnType(return_type, param_types, .normal); - const f = o.llvm_module.addFunction(fn_name.slice(&o.builder).?, fn_type.toLlvm(&o.builder)); - - var global = Builder.Global{ - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - }; - - try o.builder.llvm.globals.append(self.gpa, f); - _ = try o.builder.addGlobal(fn_name, global); - try o.builder.functions.append(self.gpa, function); - return global.kind.function; + return o.builder.addFunction(try o.builder.fnType(return_type, param_types, .normal), fn_name); } /// Creates a floating point comparison by lowering to the appropriate @@ -8290,22 +8184,22 @@ pub const FuncGen = struct { .mul => return self.wip.bin(.fmul, params[0], params[1], ""), .div => return self.wip.bin(.fdiv, params[0], params[1], ""), .fmod => return self.wip.bin(.frem, params[0], params[1], ""), - .fmax => return self.wip.bin(.@"llvm.maxnum.", params[0], params[1], ""), - .fmin => return self.wip.bin(.@"llvm.minnum.", params[0], params[1], ""), - .ceil => return self.wip.un(.@"llvm.ceil.", params[0], ""), - .cos => return self.wip.un(.@"llvm.cos.", params[0], ""), - .exp => return self.wip.un(.@"llvm.exp.", params[0], ""), - .exp2 => return self.wip.un(.@"llvm.exp2.", params[0], ""), - .fabs => return self.wip.un(.@"llvm.fabs.", params[0], ""), - .floor => return self.wip.un(.@"llvm.floor.", params[0], ""), - .log => return self.wip.un(.@"llvm.log.", params[0], ""), - .log10 => return self.wip.un(.@"llvm.log10.", params[0], ""), - .log2 => return self.wip.un(.@"llvm.log2.", params[0], ""), - .round => return self.wip.un(.@"llvm.round.", params[0], ""), - .sin => return self.wip.un(.@"llvm.sin.", params[0], ""), - .sqrt => return self.wip.un(.@"llvm.sqrt.", params[0], ""), - .trunc => return self.wip.un(.@"llvm.trunc.", params[0], ""), - .fma => return self.wip.fusedMultiplyAdd(params[0], params[1], params[2]), + .fmax => return self.wip.callIntrinsic(.maxnum, &.{llvm_ty}, ¶ms, ""), + .fmin => return self.wip.callIntrinsic(.minnum, &.{llvm_ty}, ¶ms, ""), + .ceil => return self.wip.callIntrinsic(.ceil, &.{llvm_ty}, ¶ms, ""), + .cos => return self.wip.callIntrinsic(.cos, &.{llvm_ty}, ¶ms, ""), + .exp => return self.wip.callIntrinsic(.exp, &.{llvm_ty}, ¶ms, ""), + .exp2 => return self.wip.callIntrinsic(.exp2, &.{llvm_ty}, ¶ms, ""), + .fabs => return self.wip.callIntrinsic(.fabs, &.{llvm_ty}, ¶ms, ""), + .floor => return self.wip.callIntrinsic(.floor, &.{llvm_ty}, ¶ms, ""), + .log => return self.wip.callIntrinsic(.log, &.{llvm_ty}, ¶ms, ""), + .log10 => return self.wip.callIntrinsic(.log10, &.{llvm_ty}, ¶ms, ""), + .log2 => return self.wip.callIntrinsic(.log2, &.{llvm_ty}, ¶ms, ""), + .round => return self.wip.callIntrinsic(.round, &.{llvm_ty}, ¶ms, ""), + .sin => return self.wip.callIntrinsic(.sin, &.{llvm_ty}, ¶ms, ""), + .sqrt => return self.wip.callIntrinsic(.sqrt, &.{llvm_ty}, ¶ms, ""), + .trunc => return self.wip.callIntrinsic(.trunc, &.{llvm_ty}, ¶ms, ""), + .fma => return self.wip.callIntrinsic(.fma, &.{llvm_ty}, ¶ms, ""), .tan => unreachable, }; @@ -8499,25 +8393,27 @@ pub const FuncGen = struct { const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); - const result = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) - .@"llvm.sshl.sat." - else - .@"llvm.ushl.sat.", lhs, casted_rhs, ""); + const llvm_lhs_ty = try o.lowerType(lhs_ty); + const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder); + const result = try self.wip.callIntrinsic( + if (lhs_scalar_ty.isSignedInt(mod)) .@"sshl.sat" else .@"ushl.sat", + &.{llvm_lhs_ty}, + &.{ lhs, casted_rhs }, + "", + ); // LLVM langref says "If b is (statically or dynamically) equal to or // larger than the integer bit width of the arguments, the result is a // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_llvm_ty = try o.lowerType(lhs_ty); - const lhs_scalar_llvm_ty = lhs_llvm_ty.scalarType(&o.builder); const bits = try o.builder.splatValue( - lhs_llvm_ty, - try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits), + llvm_lhs_ty, + try o.builder.intConst(llvm_lhs_scalar_ty, lhs_bits), ); const lhs_max = try o.builder.splatValue( - lhs_llvm_ty, - try o.builder.intConst(lhs_scalar_llvm_ty, -1), + llvm_lhs_ty, + try o.builder.intConst(llvm_lhs_scalar_ty, -1), ); const in_range = try self.wip.icmp(.ult, rhs, bits, ""); return self.wip.select(in_range, result, lhs_max, ""); @@ -8940,90 +8836,38 @@ pub const FuncGen = struct { fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; - const llvm_fn = try self.getIntrinsic("llvm.trap", &.{}); - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), - llvm_fn, - undefined, - 0, - .Cold, - .Auto, - "", - ), &self.wip); + _ = try self.wip.callIntrinsic(.trap, &.{}, &.{}, ""); _ = try self.wip.@"unreachable"(); return .none; } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; - const llvm_fn = try self.getIntrinsic("llvm.debugtrap", &.{}); - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), - llvm_fn, - undefined, - 0, - .C, - .Auto, - "", - ), &self.wip); + _ = try self.wip.callIntrinsic(.debugtrap, &.{}, &.{}, ""); return .none; } fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; - const mod = o.module; const llvm_usize = try o.lowerType(Type.usize); - const target = mod.getTarget(); - if (!target_util.supportsReturnAddress(target)) { + if (!target_util.supportsReturnAddress(o.module.getTarget())) { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } - - const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{}); - const params = [_]*llvm.Value{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - }; - const ptr_val = (try self.wip.unimplemented(.ptr, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.ptr, &.{.i32}, .normal)).toLlvm(&o.builder), - llvm_fn, - ¶ms, - params.len, - .Fast, - .Auto, - "", - ), &self.wip); - return self.wip.cast(.ptrtoint, ptr_val, llvm_usize, ""); + const result = try self.wip.callIntrinsic(.returnaddress, &.{}, &.{ + try o.builder.intValue(.i32, 0), + }, ""); + return self.wip.cast(.ptrtoint, result, llvm_usize, ""); } fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; - const llvm_fn_name = "llvm.frameaddress.p0"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const fn_type = try o.builder.fnType(.ptr, &.{.i32}, .normal); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); - }; - const llvm_fn_ty = try o.builder.fnType(.ptr, &.{.i32}, .normal); - - const params = [_]*llvm.Value{ - (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), - }; - const ptr_val = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( - self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - llvm_fn, - ¶ms, - params.len, - .Fast, - .Auto, - "", - ), - &self.wip, - ); - return self.wip.cast(.ptrtoint, ptr_val, try o.lowerType(Type.usize), ""); + const result = try self.wip.callIntrinsic(.frameaddress, &.{.ptr}, &.{ + try o.builder.intValue(.i32, 0), + }, ""); + return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), ""); } fn airFence(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9526,26 +9370,37 @@ pub const FuncGen = struct { return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } - fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Function.Instruction.Tag) !Builder.Value { + fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const inst_ty = self.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const wrong_size_result = try self.wip.bin(intrinsic, operand, (try o.builder.intConst(.i1, 0)).toValue(), ""); - - const result_ty = self.typeOfIndex(inst); - return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); + const result = + try self.wip.callIntrinsic( + intrinsic, + &.{try o.lowerType(operand_ty)}, + &.{ operand, .false }, + "", + ); + return self.wip.conv(.unsigned, result, try o.lowerType(inst_ty), ""); } - fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Function.Instruction.Tag) !Builder.Value { + fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const inst_ty = self.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const wrong_size_result = try self.wip.un(intrinsic, operand, ""); - - const result_ty = self.typeOfIndex(inst); - return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); + const result = try self.wip.callIntrinsic( + intrinsic, + &.{try o.lowerType(operand_ty)}, + &.{operand}, + "", + ); + return self.wip.conv(.unsigned, result, try o.lowerType(inst_ty), ""); } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9556,6 +9411,7 @@ pub const FuncGen = struct { var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); + const inst_ty = self.typeOfIndex(inst); var operand = try self.resolveInst(ty_op.operand); var llvm_operand_ty = try o.lowerType(operand_ty); @@ -9576,10 +9432,8 @@ pub const FuncGen = struct { bits = bits + 8; } - const wrong_size_result = try self.wip.un(.@"llvm.bswap.", operand, ""); - - const result_ty = self.typeOfIndex(inst); - return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); + const result = try self.wip.callIntrinsic(.bswap, &.{llvm_operand_ty}, &.{operand}, ""); + return self.wip.conv(.unsigned, result, try o.lowerType(inst_ty), ""); } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9609,11 +9463,7 @@ pub const FuncGen = struct { self.wip.cursor = .{ .block = end_block }; const phi = try self.wip.phi(.i1, ""); - try phi.finish( - &.{ Builder.Constant.true.toValue(), Builder.Constant.false.toValue() }, - &.{ valid_block, invalid_block }, - &self.wip, - ); + try phi.finish(&.{ .true, .false }, &.{ valid_block, invalid_block }, &self.wip); return phi.toValue(); } @@ -9646,37 +9496,24 @@ pub const FuncGen = struct { errdefer assert(o.named_enum_map.remove(enum_type.decl)); const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try o.builder.fmt("__zig_is_named_enum_value_{}", .{ - fqn.fmt(&mod.intern_pool), - }); + const function_index = try o.builder.addFunction( + try o.builder.fnType(.i1, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal), + try o.builder.fmt("__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}), + ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - const fn_type = try o.builder.fnType(.i1, &.{ - try o.lowerType(enum_type.tag_ty.toType()), - }, .normal); - const fn_val = o.llvm_module.addFunction(llvm_fn_name.slice(&o.builder).?, fn_type.toLlvm(&o.builder)); - fn_val.setLinkage(.Internal); - fn_val.setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, fn_val); + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); - var global = Builder.Global{ - .linkage = .internal, - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - .call_conv = .fastcc, - .attributes = try attributes.finish(&o.builder), - }; - try o.builder.llvm.globals.append(self.gpa, fn_val); - _ = try o.builder.addGlobal(llvm_fn_name, global); - try o.builder.functions.append(self.gpa, function); - gop.value_ptr.* = global.kind.function; + function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; + function_index.ptr(&o.builder).call_conv = .fastcc; + function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); + gop.value_ptr.* = function_index; - var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); + var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9693,13 +9530,13 @@ pub const FuncGen = struct { try wip_switch.addCase(this_tag_int_value, named_block, &wip); } wip.cursor = .{ .block = named_block }; - _ = try wip.ret(Builder.Constant.true.toValue()); + _ = try wip.ret(.true); wip.cursor = .{ .block = unnamed_block }; - _ = try wip.ret(Builder.Constant.false.toValue()); + _ = try wip.ret(.false); try wip.finish(); - return global.kind.function; + return function_index; } fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9730,38 +9567,27 @@ pub const FuncGen = struct { if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function; errdefer assert(o.decl_map.remove(enum_type.decl)); + const usize_ty = try o.lowerType(Type.usize); + const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); + const function_index = try o.builder.addFunction( + try o.builder.fnType(ret_ty, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal), + try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}), + ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const usize_ty = try o.lowerType(Type.usize); + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); - const fn_type = try o.builder.fnType(ret_ty, &.{ - try o.lowerType(enum_type.tag_ty.toType()), - }, .normal); - const fn_val = o.llvm_module.addFunction(llvm_fn_name.slice(&o.builder).?, fn_type.toLlvm(&o.builder)); - fn_val.setLinkage(.Internal); - fn_val.setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, fn_val); + function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; + function_index.ptr(&o.builder).call_conv = .fastcc; + function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); + gop.value_ptr.* = function_index.ptrConst(&o.builder).global; - var global = Builder.Global{ - .linkage = .internal, - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - .call_conv = .fastcc, - .attributes = try attributes.finish(&o.builder), - }; - try o.builder.llvm.globals.append(self.gpa, fn_val); - gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); - try o.builder.functions.append(self.gpa, function); - - var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); + var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9817,7 +9643,7 @@ pub const FuncGen = struct { _ = try wip.@"unreachable"(); try wip.finish(); - return global.kind.function; + return function_index; } fn getCmpLtErrorsLenFunction(self: *FuncGen) !Builder.Function.Index { @@ -9826,33 +9652,23 @@ pub const FuncGen = struct { const name = try o.builder.string(lt_errors_fn_name); if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; - // Function signature: fn (anyerror) bool - - const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal); - const llvm_fn = o.llvm_module.addFunction(name.slice(&o.builder).?, fn_type.toLlvm(&o.builder)); + const function_index = try o.builder.addFunction( + try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal), + name, + ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - llvm_fn.setLinkage(.Internal); - llvm_fn.setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, llvm_fn); + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); - var global = Builder.Global{ - .linkage = .internal, - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - .call_conv = .fastcc, - .attributes = try attributes.finish(&o.builder), - }; + function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; + function_index.ptr(&o.builder).call_conv = .fastcc; + function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); - try o.builder.llvm.globals.append(self.gpa, llvm_fn); - _ = try o.builder.addGlobal(name, global); - try o.builder.functions.append(self.gpa, function); - return global.kind.function; + return function_index; } fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -10416,29 +10232,12 @@ pub const FuncGen = struct { .data => {}, } - const llvm_fn_name = "llvm.prefetch.p0"; - // declare void @llvm.prefetch(i8*, i32, i32, i32) - const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal); - const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse - o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - - const ptr = try self.resolveInst(prefetch.ptr); - - const params = [_]*llvm.Value{ - ptr.toLlvm(&self.wip), - (try o.builder.intConst(.i32, @intFromEnum(prefetch.rw))).toLlvm(&o.builder), - (try o.builder.intConst(.i32, prefetch.locality)).toLlvm(&o.builder), - (try o.builder.intConst(.i32, @intFromEnum(prefetch.cache))).toLlvm(&o.builder), - }; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCallOld( - llvm_fn_ty.toLlvm(&o.builder), - fn_val, - ¶ms, - params.len, - .C, - .Auto, - "", - ), &self.wip); + _ = try self.wip.callIntrinsic(.prefetch, &.{.ptr}, &.{ + try self.resolveInst(prefetch.ptr), + try o.builder.intValue(.i32, prefetch.rw), + try o.builder.intValue(.i32, prefetch.locality), + try o.builder.intValue(.i32, prefetch.cache), + }, ""); return .none; } @@ -10451,26 +10250,20 @@ pub const FuncGen = struct { return self.wip.cast(.addrspacecast, operand, try o.lowerType(inst_ty), ""); } - fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !Builder.Value { + fn amdgcnWorkIntrinsic( + self: *FuncGen, + dimension: u32, + default: u32, + comptime basename: []const u8, + ) !Builder.Value { const o = self.dg.object; - const llvm_fn_name = switch (dimension) { - 0 => basename ++ ".x", - 1 => basename ++ ".y", - 2 => basename ++ ".z", + const intrinsic = switch (dimension) { + 0 => @field(Builder.Intrinsic, basename ++ ".x"), + 1 => @field(Builder.Intrinsic, basename ++ ".y"), + 2 => @field(Builder.Intrinsic, basename ++ ".z"), else => return o.builder.intValue(.i32, default), }; - - const args: [0]*llvm.Value = .{}; - const llvm_fn = try self.getIntrinsic(llvm_fn_name, &.{}); - return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(.i32, &.{}, .normal)).toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); + return self.wip.callIntrinsic(intrinsic, &.{}, &.{}, ""); } fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -10480,7 +10273,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workitem.id"); + return self.amdgcnWorkIntrinsic(dimension, 0, "amdgcn.workitem.id"); } fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -10492,27 +10285,9 @@ pub const FuncGen = struct { const dimension = pl_op.payload; if (dimension >= 3) return o.builder.intValue(.i32, 1); - var attributes: Builder.FunctionAttributes.Wip = .{}; - defer attributes.deinit(&o.builder); - // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 - const llvm_fn = try self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); - const args: [0]*llvm.Value = .{}; - const llvm_ret_ty = try o.builder.ptrType(Builder.AddrSpace.amdgpu.constant); - const dispatch_ptr = (try self.wip.unimplemented(llvm_ret_ty, "")).finish(self.builder.buildCallOld( - (try o.builder.fnType(llvm_ret_ty, &.{}, .normal)).toLlvm(&o.builder), - llvm_fn, - &args, - args.len, - .Fast, - .Auto, - "", - ), &self.wip); - try attributes.addRetAttr(.{ - .@"align" = comptime Builder.Alignment.fromByteUnits(4), - }, &o.builder); - o.addAttrInt(dispatch_ptr.toLlvm(&self.wip), 0, "align", 4); + const dispatch_ptr = try self.wip.callIntrinsic(.@"amdgcn.dispatch.ptr", &.{}, &.{}, ""); // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. @@ -10530,7 +10305,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workgroup.id"); + return self.amdgcnWorkIntrinsic(dimension, 0, "amdgcn.workgroup.id"); } fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { @@ -10716,20 +10491,6 @@ pub const FuncGen = struct { } } - fn getIntrinsic( - fg: *FuncGen, - name: []const u8, - types: []const Builder.Type, - ) Allocator.Error!*llvm.Value { - const o = fg.dg.object; - const id = llvm.lookupIntrinsicID(name.ptr, name.len); - assert(id != 0); - const llvm_types = try o.gpa.alloc(*llvm.Type, types.len); - defer o.gpa.free(llvm_types); - for (llvm_types, types) |*llvm_type, ty| llvm_type.* = ty.toLlvm(&o.builder); - return o.llvm_module.getIntrinsicDeclaration(id, llvm_types.ptr, llvm_types.len); - } - /// Load a by-ref type by constructing a new alloca and performing a memcpy. fn loadByRef( fg: *FuncGen, @@ -10778,7 +10539,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); + const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index); const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); @@ -10848,7 +10609,7 @@ pub const FuncGen = struct { assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); + const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index); const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); @@ -10982,26 +10743,15 @@ pub const FuncGen = struct { else => unreachable, }; - const fn_llvm_ty = (try o.builder.fnType(llvm_usize, &(.{llvm_usize} ** 2), .normal)).toLlvm(&o.builder); - const array_ptr_as_usize = try fg.wip.cast(.ptrtoint, array_ptr, llvm_usize, ""); - const args = [_]*llvm.Value{ array_ptr_as_usize.toLlvm(&fg.wip), default_value.toLlvm(&fg.wip) }; - const asm_fn = llvm.getInlineAsm( - fn_llvm_ty, - arch_specific.template.ptr, - arch_specific.template.len, - arch_specific.constraints.ptr, - arch_specific.constraints.len, - .True, // has side effects - .False, // alignstack - .ATT, - .False, // can throw - ); - - const call = (try fg.wip.unimplemented(llvm_usize, "")).finish( - fg.builder.buildCallOld(fn_llvm_ty, asm_fn, &args, args.len, .C, .Auto, ""), - &fg.wip, + return try fg.wip.callAsm( + .none, + try o.builder.fnType(llvm_usize, &.{ llvm_usize, llvm_usize }, .normal), + .{ .sideeffect = true }, + try o.builder.string(arch_specific.template), + try o.builder.string(arch_specific.constraints), + &.{ try fg.wip.cast(.ptrtoint, array_ptr, llvm_usize, ""), default_value }, + "", ); - return call; } fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 95c1218b70..28545fe95e 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -50,10 +50,12 @@ constant_extra: std.ArrayListUnmanaged(u32), constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb), pub const expected_args_len = 16; +pub const expected_attrs_len = 16; pub const expected_fields_len = 32; pub const expected_gep_indices_len = 8; pub const expected_cases_len = 8; pub const expected_incoming_len = 8; +pub const expected_intrinsic_name_len = 64; pub const Options = struct { allocator: Allocator, @@ -151,11 +153,14 @@ pub const Type = enum(u32) { i80, i128, ptr, + @"ptr addrspace(4)", none = std.math.maxInt(u32), _, pub const err_int = Type.i16; + pub const ptr_amdgpu_constant = + @field(Type, std.fmt.comptimePrint("ptr{ }", .{AddrSpace.amdgpu.constant})); pub const Tag = enum(u4) { simple, @@ -391,7 +396,7 @@ pub const Type = enum(u32) { .double, .i64, .x86_mmx => 64, .x86_fp80, .i80 => 80, .fp128, .ppc_fp128, .i128 => 128, - .ptr => @panic("TODO: query data layout"), + .ptr, .@"ptr addrspace(4)" => @panic("TODO: query data layout"), _ => { const item = builder.type_items.items[@intFromEnum(self)]; return switch (item.tag) { @@ -690,7 +695,7 @@ pub const Type = enum(u32) { } }, .integer => try writer.print("i{d}", .{item.data}), - .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(item.data))}), + .pointer => try writer.print("ptr{ }", .{@as(AddrSpace, @enumFromInt(item.data))}), .target => { var extra = data.builder.typeExtraDataTrail(Type.Target, item.data); const types = extra.trail.next(extra.data.types_len, Type, data.builder); @@ -795,6 +800,7 @@ pub const Type = enum(u32) { .i80, .i128, .ptr, + .@"ptr addrspace(4)", => true, .none => unreachable, _ => { @@ -1201,12 +1207,20 @@ pub const Attribute = union(Kind) { try writer.print(",{d}", .{allocsize.num_elems}); try writer.writeByte(')'); }, - .memory => |memory| try writer.print(" {s}({s}, argmem: {s}, inaccessiblemem: {s})", .{ - @tagName(attribute), - @tagName(memory.other), - @tagName(memory.argmem), - @tagName(memory.inaccessiblemem), - }), + .memory => |memory| { + try writer.print(" {s}(", .{@tagName(attribute)}); + var any = memory.other != .none or + (memory.argmem == .none and memory.inaccessiblemem == .none); + if (any) try writer.writeAll(@tagName(memory.other)); + inline for (.{ "argmem", "inaccessiblemem" }) |kind| { + if (@field(memory, kind) != memory.other) { + if (any) try writer.writeAll(", "); + try writer.print("{s}: {s}", .{ kind, @tagName(@field(memory, kind)) }); + any = true; + } + } + try writer.writeByte(')'); + }, .uwtable => |uwtable| if (uwtable != .none) { try writer.print(" {s}", .{@tagName(attribute)}); if (uwtable != UwTable.default) try writer.print("({s})", .{@tagName(uwtable)}); @@ -1424,12 +1438,16 @@ pub const Attribute = union(Kind) { }; pub const Memory = packed struct(u32) { - argmem: Effect, - inaccessiblemem: Effect, - other: Effect, + argmem: Effect = .none, + inaccessiblemem: Effect = .none, + other: Effect = .none, _: u26 = 0, pub const Effect = enum(u2) { none, read, write, readwrite }; + + fn all(effect: Effect) Memory { + return .{ .argmem = effect, .inaccessiblemem = effect, .other = effect }; + } }; pub const UwTable = enum(u32) { @@ -2279,6 +2297,820 @@ pub const Variable = struct { }; }; +pub const Intrinsic = enum { + // Variable Argument Handling + va_start, + va_end, + va_copy, + + // Code Generator + returnaddress, + addressofreturnaddress, + sponentry, + frameaddress, + prefetch, + @"thread.pointer", + + // Standard C/C++ Library + abs, + smax, + smin, + umax, + umin, + memcpy, + @"memcpy.inline", + memmove, + memset, + @"memset.inline", + sqrt, + powi, + sin, + cos, + pow, + exp, + exp2, + ldexp, + frexp, + log, + log10, + log2, + fma, + fabs, + minnum, + maxnum, + minimum, + maximum, + copysign, + floor, + ceil, + trunc, + rint, + nearbyint, + round, + roundeven, + lround, + llround, + lrint, + llrint, + + // Bit Manipulation + bitreverse, + bswap, + ctpop, + ctlz, + cttz, + fshl, + fshr, + + // Arithmetic with Overflow + @"sadd.with.overflow", + @"uadd.with.overflow", + @"ssub.with.overflow", + @"usub.with.overflow", + @"smul.with.overflow", + @"umul.with.overflow", + + // Saturation Arithmetic + @"sadd.sat", + @"uadd.sat", + @"ssub.sat", + @"usub.sat", + @"sshl.sat", + @"ushl.sat", + + // Fixed Point Arithmetic + @"smul.fix", + @"umul.fix", + @"smul.fix.sat", + @"umul.fix.sat", + @"sdiv.fix", + @"udiv.fix", + @"sdiv.fix.sat", + @"udiv.fix.sat", + + // Specialised Arithmetic + canonicalisze, + fmuladd, + + // Vector Reduction + @"vector.reduce.add", + @"vector.reduce.fadd", + @"vector.reduce.mul", + @"vector.reduce.fmul", + @"vector.reduce.and", + @"vector.reduce.or", + @"vector.reduce.xor", + @"vector.reduce.smax", + @"vector.reduce.smin", + @"vector.reduce.umax", + @"vector.reduce.umin", + @"vector.reduce.fmax", + @"vector.reduce.fmin", + @"vector.reduce.fmaximum", + @"vector.reduce.fminimum", + @"vector.reduce.insert", + @"vector.reduce.extract", + @"vector.insert", + @"vector.extract", + + // General + @"llvm.var.annotation", + @"llvm.ptr.annotation", + annotation, + @"codeview.annotation", + trap, + debugtrap, + ubsantrap, + stackprotector, + stackguard, + objectsize, + expect, + @"expect.with.probability", + assume, + @"ssa.copy", + @"type.test", + @"type.checked.load", + @"type.checked.load.relative", + @"arithmetic.fence", + donothing, + @"load.relative", + @"llvm.sideeffect", + @"is.constant", + ptrmask, + @"threadlocal.address", + vscale, + + // AMDGPU + @"amdgcn.workitem.id.x", + @"amdgcn.workitem.id.y", + @"amdgcn.workitem.id.z", + @"amdgcn.workgroup.id.x", + @"amdgcn.workgroup.id.y", + @"amdgcn.workgroup.id.z", + @"amdgcn.dispatch.ptr", + + // WebAssembly + @"wasm.memory.size", + @"wasm.memory.grow", + + const Signature = struct { + params: []const Parameter = &.{}, + attrs: []const Attribute = &.{}, + + const Parameter = struct { + kind: Kind, + attrs: []const Attribute = &.{}, + + const Kind = union(enum) { + type: Type, + overloaded, + overloaded_tuple: u8, + matches: u8, + matches_tuple: packed struct { param: u4, field: u4 }, + matches_with_overflow: u8, + }; + }; + }; + + const signatures = std.enums.EnumArray(Intrinsic, Signature).initDefault(.{}, .{ + .va_start = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + .va_end = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + .va_copy = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + + .returnaddress = .{ + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .addressofreturnaddress = .{ + .params = &.{ + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .sponentry = .{ + .params = &.{ + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .frameaddress = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .prefetch = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + .{ .kind = .overloaded, .attrs = &.{ .nocapture, .readonly } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.readwrite) } }, + }, + .@"thread.pointer" = .{ + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .abs = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .smax = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .smin = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .umax = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .umin = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .sqrt = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .powi = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .sin = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .cos = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .pow = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .exp = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .exp2 = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .ldexp = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .frexp = .{ + .params = &.{ + .{ .kind = .{ .overloaded_tuple = 2 } }, + .{ .kind = .{ .matches_tuple = .{ .param = 0, .field = 0 } } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .log = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .log10 = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .log2 = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .fma = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .fabs = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .minnum = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .maxnum = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .minimum = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .maximum = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .copysign = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .floor = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .ceil = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .trunc = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .rint = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .nearbyint = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .round = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .roundeven = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .lround = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .llround = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .lrint = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .llrint = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .bitreverse = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .bswap = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .ctpop = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .ctlz = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .cttz = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .fshl = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .fshr = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .@"sadd.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"uadd.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"ssub.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"usub.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"smul.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"umul.with.overflow" = .{ + .params = &.{ + .{ .kind = .{ .matches_with_overflow = 1 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .@"sadd.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"uadd.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"ssub.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"usub.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"sshl.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"ushl.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .@"smul.fix" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"umul.fix" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"smul.fix.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"umul.fix.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"sdiv.fix" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"udiv.fix" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"sdiv.fix.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"udiv.fix.sat" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .trap = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + }, + .attrs = &.{ .cold, .noreturn, .nounwind, .{ .memory = .{ .inaccessiblemem = .write } } }, + }, + .debugtrap = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + }, + .attrs = &.{.nounwind}, + }, + .ubsantrap = .{ + .params = &.{ + .{ .kind = .{ .type = .void } }, + .{ .kind = .{ .type = .i8 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .cold, .noreturn, .nounwind }, + }, + + .@"amdgcn.workitem.id.x" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.workitem.id.y" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.workitem.id.z" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.workgroup.id.x" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.workgroup.id.y" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.workgroup.id.z" = .{ + .params = &.{ + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"amdgcn.dispatch.ptr" = .{ + .params = &.{ + .{ + .kind = .{ .type = Type.ptr_amdgpu_constant }, + .attrs = &.{.{ .@"align" = Builder.Alignment.fromByteUnits(4) }}, + }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .@"wasm.memory.size" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"wasm.memory.grow" = .{ + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i32 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + }); +}; + pub const Function = struct { global: Global.Index, call_conv: CallConv = CallConv.default, @@ -2414,39 +3246,6 @@ pub const Function = struct { insertelement, insertvalue, inttoptr, - @"llvm.maxnum.", - @"llvm.minnum.", - @"llvm.ceil.", - @"llvm.cos.", - @"llvm.exp.", - @"llvm.exp2.", - @"llvm.fabs.", - @"llvm.floor.", - @"llvm.log.", - @"llvm.log10.", - @"llvm.log2.", - @"llvm.round.", - @"llvm.sin.", - @"llvm.sqrt.", - @"llvm.trunc.", - @"llvm.fma.", - @"llvm.bitreverse.", - @"llvm.bswap.", - @"llvm.ctpop.", - @"llvm.ctlz.", - @"llvm.cttz.", - @"llvm.sadd.sat.", - @"llvm.smax.", - @"llvm.smin.", - @"llvm.smul.fix.sat.", - @"llvm.sshl.sat.", - @"llvm.ssub.sat.", - @"llvm.uadd.sat.", - @"llvm.umax.", - @"llvm.umin.", - @"llvm.umul.fix.sat.", - @"llvm.ushl.sat.", - @"llvm.usub.sat.", load, @"load atomic", @"load atomic volatile", @@ -2575,22 +3374,6 @@ pub const Function = struct { .@"frem fast", .fsub, .@"fsub fast", - .@"llvm.maxnum.", - .@"llvm.minnum.", - .@"llvm.ctlz.", - .@"llvm.cttz.", - .@"llvm.sadd.sat.", - .@"llvm.smax.", - .@"llvm.smin.", - .@"llvm.smul.fix.sat.", - .@"llvm.sshl.sat.", - .@"llvm.ssub.sat.", - .@"llvm.uadd.sat.", - .@"llvm.umax.", - .@"llvm.umin.", - .@"llvm.umul.fix.sat.", - .@"llvm.ushl.sat.", - .@"llvm.usub.sat.", .lshr, .@"lshr exact", .mul, @@ -2710,22 +3493,6 @@ pub const Function = struct { .changeScalarAssumeCapacity(.i1, wip.builder), .fneg, .@"fneg fast", - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", => @as(Value, @enumFromInt(instruction.data)).typeOfWip(wip), .getelementptr, .@"getelementptr inbounds", @@ -2762,7 +3529,6 @@ pub const Function = struct { }, .unimplemented => @enumFromInt(instruction.data), .va_arg => wip.extraData(VaArg, instruction.data).type, - .@"llvm.fma." => wip.extraData(FusedMultiplyAdd, instruction.data).a.typeOfWip(wip), }; } @@ -2791,22 +3557,6 @@ pub const Function = struct { .@"frem fast", .fsub, .@"fsub fast", - .@"llvm.maxnum.", - .@"llvm.minnum.", - .@"llvm.ctlz.", - .@"llvm.cttz.", - .@"llvm.sadd.sat.", - .@"llvm.smax.", - .@"llvm.smin.", - .@"llvm.smul.fix.sat.", - .@"llvm.sshl.sat.", - .@"llvm.ssub.sat.", - .@"llvm.uadd.sat.", - .@"llvm.umax.", - .@"llvm.umin.", - .@"llvm.umul.fix.sat.", - .@"llvm.ushl.sat.", - .@"llvm.usub.sat.", .lshr, .@"lshr exact", .mul, @@ -2927,22 +3677,6 @@ pub const Function = struct { .changeScalarAssumeCapacity(.i1, builder), .fneg, .@"fneg fast", - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", => @as(Value, @enumFromInt(instruction.data)).typeOf(function_index, builder), .getelementptr, .@"getelementptr inbounds", @@ -2981,7 +3715,6 @@ pub const Function = struct { }, .unimplemented => @enumFromInt(instruction.data), .va_arg => function.extraData(VaArg, instruction.data).type, - .@"llvm.fma." => function.extraData(FusedMultiplyAdd, instruction.data).a.typeOf(function_index, builder), }; } @@ -3074,12 +3807,6 @@ pub const Function = struct { mask: Value, }; - pub const FusedMultiplyAdd = struct { - a: Value, - b: Value, - c: Value, - }; - pub const ExtractValue = struct { val: Value, indices_len: u32, @@ -3487,24 +4214,7 @@ pub const WipFunction = struct { switch (tag) { .fneg, .@"fneg fast", - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", => assert(val.typeOfWip(self).scalarType(self.builder).isFloatingPoint()), - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", - => assert(val.typeOfWip(self).scalarType(self.builder).isInteger(self.builder)), else => unreachable, } try self.ensureUnusedExtraCapacity(1, NoExtra, 0); @@ -3513,43 +4223,10 @@ pub const WipFunction = struct { switch (tag) { .fneg => self.llvm.builder.setFastMath(false), .@"fneg fast" => self.llvm.builder.setFastMath(true), - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", - => {}, else => unreachable, } self.llvm.instructions.appendAssumeCapacity(switch (tag) { .fneg, .@"fneg fast" => &llvm.Builder.buildFNeg, - .@"llvm.ceil." => &llvm.Builder.buildCeil, - .@"llvm.cos." => &llvm.Builder.buildCos, - .@"llvm.exp." => &llvm.Builder.buildExp, - .@"llvm.exp2." => &llvm.Builder.buildExp2, - .@"llvm.fabs." => &llvm.Builder.buildFAbs, - .@"llvm.floor." => &llvm.Builder.buildFloor, - .@"llvm.log." => &llvm.Builder.buildLog, - .@"llvm.log10." => &llvm.Builder.buildLog10, - .@"llvm.log2." => &llvm.Builder.buildLog2, - .@"llvm.round." => &llvm.Builder.buildRound, - .@"llvm.sin." => &llvm.Builder.buildSin, - .@"llvm.sqrt." => &llvm.Builder.buildSqrt, - .@"llvm.trunc." => &llvm.Builder.buildFTrunc, - .@"llvm.bitreverse." => &llvm.Builder.buildBitReverse, - .@"llvm.bswap." => &llvm.Builder.buildBSwap, - .@"llvm.ctpop." => &llvm.Builder.buildCTPop, else => unreachable, }(self.llvm.builder, val.toLlvm(self), instruction.llvmName(self))); } @@ -3593,20 +4270,6 @@ pub const WipFunction = struct { .@"frem fast", .fsub, .@"fsub fast", - .@"llvm.maxnum.", - .@"llvm.minnum.", - .@"llvm.sadd.sat.", - .@"llvm.smax.", - .@"llvm.smin.", - .@"llvm.smul.fix.sat.", - .@"llvm.sshl.sat.", - .@"llvm.ssub.sat.", - .@"llvm.uadd.sat.", - .@"llvm.umax.", - .@"llvm.umin.", - .@"llvm.umul.fix.sat.", - .@"llvm.ushl.sat.", - .@"llvm.usub.sat.", .lshr, .@"lshr exact", .mul, @@ -3627,9 +4290,6 @@ pub const WipFunction = struct { .urem, .xor, => assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)), - .@"llvm.ctlz.", - .@"llvm.cttz.", - => assert(lhs.typeOfWip(self).scalarType(self.builder).isInteger(self.builder) and rhs.typeOfWip(self) == .i1), else => unreachable, } try self.ensureUnusedExtraCapacity(1, Instruction.Binary, 0); @@ -3665,22 +4325,6 @@ pub const WipFunction = struct { .fmul, .@"fmul fast" => &llvm.Builder.buildFMul, .frem, .@"frem fast" => &llvm.Builder.buildFRem, .fsub, .@"fsub fast" => &llvm.Builder.buildFSub, - .@"llvm.maxnum." => &llvm.Builder.buildMaxNum, - .@"llvm.minnum." => &llvm.Builder.buildMinNum, - .@"llvm.ctlz." => &llvm.Builder.buildCTLZ, - .@"llvm.cttz." => &llvm.Builder.buildCTTZ, - .@"llvm.sadd.sat." => &llvm.Builder.buildSAddSat, - .@"llvm.smax." => &llvm.Builder.buildSMax, - .@"llvm.smin." => &llvm.Builder.buildSMin, - .@"llvm.smul.fix.sat." => &llvm.Builder.buildSMulFixSat, - .@"llvm.sshl.sat." => &llvm.Builder.buildSShlSat, - .@"llvm.ssub.sat." => &llvm.Builder.buildSSubSat, - .@"llvm.uadd.sat." => &llvm.Builder.buildUAddSat, - .@"llvm.umax." => &llvm.Builder.buildUMax, - .@"llvm.umin." => &llvm.Builder.buildUMin, - .@"llvm.umul.fix.sat." => &llvm.Builder.buildUMulFixSat, - .@"llvm.ushl.sat." => &llvm.Builder.buildUShlSat, - .@"llvm.usub.sat." => &llvm.Builder.buildUSubSat, .lshr => &llvm.Builder.buildLShr, .@"lshr exact" => &llvm.Builder.buildLShrExact, .mul => &llvm.Builder.buildMul, @@ -4419,7 +5063,7 @@ pub const WipFunction = struct { self: *WipFunction, function_attributes: FunctionAttributes, ty: Type, - kind: Constant.Asm.Info, + kind: Constant.Assembly.Info, assembly: String, constraints: String, args: []const Value, @@ -4429,6 +5073,25 @@ pub const WipFunction = struct { return self.call(.normal, CallConv.default, function_attributes, ty, callee, args, name); } + pub fn callIntrinsic( + self: *WipFunction, + id: Intrinsic, + overload: []const Type, + args: []const Value, + name: []const u8, + ) Allocator.Error!Value { + const intrinsic = try self.builder.getIntrinsic(id, overload); + return self.call( + .normal, + CallConv.default, + .none, + intrinsic.typeOf(self.builder), + intrinsic.toValue(self.builder), + args, + name, + ); + } + pub fn vaArg(self: *WipFunction, list: Value, ty: Type, name: []const u8) Allocator.Error!Value { try self.ensureUnusedExtraCapacity(1, Instruction.VaArg, 0); const instruction = try self.addInst(name, .{ @@ -4448,29 +5111,6 @@ pub const WipFunction = struct { return instruction.toValue(); } - pub fn fusedMultiplyAdd(self: *WipFunction, a: Value, b: Value, c: Value) Allocator.Error!Value { - assert(a.typeOfWip(self) == b.typeOfWip(self) and a.typeOfWip(self) == c.typeOfWip(self)); - try self.ensureUnusedExtraCapacity(1, Instruction.FusedMultiplyAdd, 0); - const instruction = try self.addInst("", .{ - .tag = .@"llvm.fma.", - .data = self.addExtraAssumeCapacity(Instruction.FusedMultiplyAdd{ - .a = a, - .b = b, - .c = c, - }), - }); - if (self.builder.useLibLlvm()) { - self.llvm.instructions.appendAssumeCapacity(llvm.Builder.buildFMA( - self.llvm.builder, - a.toLlvm(self), - b.toLlvm(self), - c.toLlvm(self), - instruction.llvmName(self), - )); - } - return instruction.toValue(); - } - pub const WipUnimplemented = struct { instruction: Instruction.Index, @@ -4697,22 +5337,6 @@ pub const WipFunction = struct { .@"icmp ugt", .@"icmp ule", .@"icmp ult", - .@"llvm.maxnum.", - .@"llvm.minnum.", - .@"llvm.ctlz.", - .@"llvm.cttz.", - .@"llvm.sadd.sat.", - .@"llvm.smax.", - .@"llvm.smin.", - .@"llvm.smul.fix.sat.", - .@"llvm.sshl.sat.", - .@"llvm.ssub.sat.", - .@"llvm.uadd.sat.", - .@"llvm.umax.", - .@"llvm.umin.", - .@"llvm.umul.fix.sat.", - .@"llvm.ushl.sat.", - .@"llvm.usub.sat.", .lshr, .@"lshr exact", .mul, @@ -4828,22 +5452,6 @@ pub const WipFunction = struct { .fneg, .@"fneg fast", .ret, - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", => instruction.data = @intFromEnum(instructions.map(@enumFromInt(instruction.data))), .getelementptr, .@"getelementptr inbounds", @@ -4949,14 +5557,6 @@ pub const WipFunction = struct { .type = extra.type, }); }, - .@"llvm.fma." => { - const extra = self.extraData(Instruction.FusedMultiplyAdd, instruction.data); - instruction.data = wip_extra.addExtra(Instruction.FusedMultiplyAdd{ - .a = instructions.map(extra.a), - .b = instructions.map(extra.b), - .c = instructions.map(extra.c), - }); - }, } function.instructions.appendAssumeCapacity(instruction); names[@intFromEnum(new_instruction_index)] = wip_name.map(if (self.builder.strip) @@ -5627,7 +6227,7 @@ pub const Constant = enum(u32) { rhs: Constant, }; - pub const Asm = extern struct { + pub const Assembly = extern struct { type: Type, assembly: String, constraints: String, @@ -5651,7 +6251,7 @@ pub const Constant = enum(u32) { } pub fn toValue(self: Constant) Value { - return @enumFromInt(@intFromEnum(Value.first_constant) + @intFromEnum(self)); + return @enumFromInt(Value.first_constant + @intFromEnum(self)); } pub fn typeOf(self: Constant, builder: *Builder) Type { @@ -6139,7 +6739,7 @@ pub const Constant = enum(u32) { .@"asm alignstack inteldialect unwind", .@"asm sideeffect alignstack inteldialect unwind", => |tag| { - const extra = data.builder.constantExtraData(Asm, item.data); + const extra = data.builder.constantExtraData(Assembly, item.data); try writer.print("{s} {\"}, {\"}", .{ @tagName(tag), extra.assembly.fmt(data.builder), @@ -6166,18 +6766,20 @@ pub const Constant = enum(u32) { pub const Value = enum(u32) { none = std.math.maxInt(u31), + false = first_constant + @intFromEnum(Constant.false), + true = first_constant + @intFromEnum(Constant.true), _, - const first_constant: Value = @enumFromInt(1 << 31); + const first_constant = 1 << 31; pub fn unwrap(self: Value) union(enum) { instruction: Function.Instruction.Index, constant: Constant, } { - return if (@intFromEnum(self) < @intFromEnum(first_constant)) + return if (@intFromEnum(self) < first_constant) .{ .instruction = @enumFromInt(@intFromEnum(self)) } else - .{ .constant = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_constant)) }; + .{ .constant = @enumFromInt(@intFromEnum(self) - first_constant) }; } pub fn typeOfWip(self: Value, wip: *const WipFunction) Type { @@ -6349,8 +6951,11 @@ pub fn init(options: Options) InitError!Builder { inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits| assert(self.intTypeAssumeCapacity(bits) == @field(Type, std.fmt.comptimePrint("i{d}", .{bits}))); - inline for (.{0}) |addr_space| - assert(self.ptrTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr); + inline for (.{ 0, 4 }) |addr_space_index| { + const addr_space: AddrSpace = @enumFromInt(addr_space_index); + assert(self.ptrTypeAssumeCapacity(addr_space) == + @field(Type, std.fmt.comptimePrint("ptr{ }", .{addr_space}))); + } } { @@ -6883,17 +7488,136 @@ pub fn getGlobal(self: *const Builder, name: String) ?Global.Index { return @enumFromInt(self.globals.getIndex(name) orelse return null); } +pub fn addFunction(self: *Builder, ty: Type, name: String) Allocator.Error!Function.Index { + assert(!name.isAnon()); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); + try self.ensureUnusedGlobalCapacity(name); + try self.functions.ensureUnusedCapacity(self.gpa, 1); + return self.addFunctionAssumeCapacity(ty, name); +} + +pub fn addFunctionAssumeCapacity(self: *Builder, ty: Type, name: String) Function.Index { + assert(ty.isFunction(self)); + if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity( + self.llvm.module.?.addFunction(name.slice(self).?, ty.toLlvm(self)), + ); + const function_index: Function.Index = @enumFromInt(self.functions.items.len); + self.functions.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{ + .type = ty, + .kind = .{ .function = function_index }, + }) }); + return function_index; +} + +pub fn getIntrinsic( + self: *Builder, + id: Intrinsic, + overload: []const Type, +) Allocator.Error!Function.Index { + const ExpectedContents = extern union { + name: [expected_intrinsic_name_len]u8, + attrs: extern struct { + params: [expected_args_len]Type, + fn_attrs: [FunctionAttributes.params_index + expected_args_len]Attributes, + attrs: [expected_attrs_len]Attribute.Index, + fields: [expected_fields_len]Type, + }, + }; + var stack align(@max(@alignOf(std.heap.StackFallbackAllocator(0)), @alignOf(ExpectedContents))) = + std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const allocator = stack.get(); + + const name = name: { + var buffer = std.ArrayList(u8).init(allocator); + defer buffer.deinit(); + + try buffer.writer().print("llvm.{s}", .{@tagName(id)}); + for (overload) |ty| try buffer.writer().print(".{m}", .{ty.fmt(self)}); + break :name try self.string(buffer.items); + }; + if (self.getGlobal(name)) |global| return global.ptrConst(self).kind.function; + + const signature = Intrinsic.signatures.get(id); + const param_types = try allocator.alloc(Type, signature.params.len); + defer allocator.free(param_types); + const function_attributes = + try allocator.alloc(Attributes, FunctionAttributes.return_index + signature.params.len); + defer allocator.free(function_attributes); + + var attributes: struct { + builder: *Builder, + list: std.ArrayList(Attribute.Index), + + fn deinit(state: *@This()) void { + state.list.deinit(); + state.* = undefined; + } + + fn get(state: *@This(), attributes: []const Attribute) Allocator.Error!Attributes { + try state.list.resize(attributes.len); + for (state.list.items, attributes) |*item, attribute| + item.* = try state.builder.attr(attribute); + return state.builder.attrs(state.list.items); + } + } = .{ .builder = self, .list = std.ArrayList(Attribute.Index).init(allocator) }; + defer attributes.deinit(); + + var overload_index: usize = 0; + function_attributes[FunctionAttributes.function_index] = try attributes.get(signature.attrs); + for ( + param_types, + function_attributes[FunctionAttributes.return_index..], + signature.params, + ) |*param_type, *param_attributes, signature_param| { + switch (signature_param.kind) { + .type => |ty| param_type.* = ty, + .overloaded => { + param_type.* = overload[overload_index]; + overload_index += 1; + }, + .overloaded_tuple => |len| { + const fields = try allocator.alloc(Type, len); + defer allocator.free(fields); + for (fields, overload[overload_index..][0..len]) |*field, ty| field.* = ty; + param_type.* = try self.structType(.normal, fields); + overload_index += len; + }, + .matches, .matches_tuple, .matches_with_overflow => {}, + } + param_attributes.* = try attributes.get(signature_param.attrs); + } + assert(overload_index == overload.len); + for (param_types, signature.params) |*param_type, signature_param| switch (signature_param.kind) { + .type, .overloaded, .overloaded_tuple => {}, + .matches => |param_index| param_type.* = param_types[param_index], + .matches_tuple => |tuple| param_type.* = + param_types[tuple.param].structFields(self)[tuple.field], + .matches_with_overflow => |param_index| { + const ty = param_types[param_index]; + param_type.* = try self.structType(.normal, &.{ ty, try ty.changeScalar(.i1, self) }); + }, + }; + + const function_index = + try self.addFunction(try self.fnType(param_types[0], param_types[1..], .normal), name); + function_index.ptr(self).attributes = try self.fnAttrs(function_attributes); + return function_index; +} + pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Constant { + const int_value = switch (@typeInfo(@TypeOf(value))) { + .Int, .ComptimeInt => value, + .Enum => @intFromEnum(value), + else => @compileError("intConst expected an integral value, got " ++ @typeName(@TypeOf(value))), + }; var limbs: [ - switch (@typeInfo(@TypeOf(value))) { + switch (@typeInfo(@TypeOf(int_value))) { .Int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits), - .ComptimeInt => std.math.big.int.calcLimbLen(value), - else => @compileError( - "intConst expected an integral value, got " ++ @typeName(@TypeOf(value)), - ), + .ComptimeInt => std.math.big.int.calcLimbLen(int_value), + else => unreachable, } ]std.math.big.Limb = undefined; - return self.bigIntConst(ty, std.math.big.int.Mutable.init(&limbs, value).toConst()); + return self.bigIntConst(ty, std.math.big.int.Mutable.init(&limbs, int_value).toConst()); } pub fn intValue(self: *Builder, ty: Type, value: anytype) Allocator.Error!Value { @@ -7304,18 +8028,18 @@ pub fn binValue(self: *Builder, tag: Constant.Tag, lhs: Constant, rhs: Constant) pub fn asmConst( self: *Builder, ty: Type, - info: Constant.Asm.Info, + info: Constant.Assembly.Info, assembly: String, constraints: String, ) Allocator.Error!Constant { - try self.ensureUnusedConstantCapacity(1, Constant.Asm, 0); + try self.ensureUnusedConstantCapacity(1, Constant.Assembly, 0); return self.asmConstAssumeCapacity(ty, info, assembly, constraints); } pub fn asmValue( self: *Builder, ty: Type, - info: Constant.Asm.Info, + info: Constant.Assembly.Info, assembly: String, constraints: String, ) Allocator.Error!Value { @@ -7413,7 +8137,7 @@ pub fn printUnbuffered( if (variable.global.getReplacement(self) != .none) continue; const global = variable.global.ptrConst(self); try writer.print( - \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{, } + \\{} ={}{}{}{}{}{}{ }{} {s} {%}{ }{, } \\ , .{ variable.global.fmt(self), @@ -7472,7 +8196,9 @@ pub fn printUnbuffered( function.attributes.param(arg, self).fmt(self), }); if (function.instructions.len > 0) - try writer.print(" {}", .{function.arg(@intCast(arg)).fmt(function_index, self)}); + try writer.print(" {}", .{function.arg(@intCast(arg)).fmt(function_index, self)}) + else + try writer.print(" %{d}", .{arg}); } switch (global.type.functionKind(self)) { .normal => {}, @@ -7481,11 +8207,11 @@ pub fn printUnbuffered( try writer.writeAll("..."); }, } - try writer.print("){}{}", .{ global.unnamed_addr, global.addr_space }); + try writer.print("){}{ }", .{ global.unnamed_addr, global.addr_space }); if (function_attributes != .none) try writer.print(" #{d}", .{ (try attribute_groups.getOrPutValue(self.gpa, function_attributes, {})).index, }); - try writer.print("{}", .{function.alignment}); + try writer.print("{ }", .{function.alignment}); if (function.instructions.len > 0) { var block_incoming_len: u32 = undefined; try writer.writeAll(" {\n"); @@ -7735,33 +8461,6 @@ pub fn printUnbuffered( val.fmt(function_index, self), }); }, - .@"llvm.ceil.", - .@"llvm.cos.", - .@"llvm.exp.", - .@"llvm.exp2.", - .@"llvm.fabs.", - .@"llvm.floor.", - .@"llvm.log.", - .@"llvm.log10.", - .@"llvm.log2.", - .@"llvm.round.", - .@"llvm.sin.", - .@"llvm.sqrt.", - .@"llvm.trunc.", - .@"llvm.bitreverse.", - .@"llvm.bswap.", - .@"llvm.ctpop.", - => |tag| { - const val: Value = @enumFromInt(instruction.data); - const ty = val.typeOf(function_index, self); - try writer.print(" %{} = call {%} @{s}{m}({%})\n", .{ - instruction_index.name(&function).fmt(self), - ty.fmt(self), - @tagName(tag), - ty.fmt(self), - val.fmt(function_index, self), - }); - }, .getelementptr, .@"getelementptr inbounds", => |tag| { @@ -7809,41 +8508,6 @@ pub fn printUnbuffered( for (indices) |index| try writer.print(", {d}", .{index}); try writer.writeByte('\n'); }, - .@"llvm.maxnum.", - .@"llvm.minnum.", - .@"llvm.ctlz.", - .@"llvm.cttz.", - .@"llvm.sadd.sat.", - .@"llvm.smax.", - .@"llvm.smin.", - .@"llvm.smul.fix.sat.", - .@"llvm.sshl.sat.", - .@"llvm.ssub.sat.", - .@"llvm.uadd.sat.", - .@"llvm.umax.", - .@"llvm.umin.", - .@"llvm.umul.fix.sat.", - .@"llvm.ushl.sat.", - .@"llvm.usub.sat.", - => |tag| { - const extra = - function.extraData(Function.Instruction.Binary, instruction.data); - const ty = instruction_index.typeOf(function_index, self); - try writer.print(" %{} = call {%} @{s}{m}({%}, {%}{s})\n", .{ - instruction_index.name(&function).fmt(self), - ty.fmt(self), - @tagName(tag), - ty.fmt(self), - extra.lhs.fmt(function_index, self), - extra.rhs.fmt(function_index, self), - switch (tag) { - .@"llvm.smul.fix.sat.", - .@"llvm.umul.fix.sat.", - => ", i32 0", - else => "", - }, - }); - }, .load, .@"load atomic", .@"load atomic volatile", @@ -7984,20 +8648,6 @@ pub fn printUnbuffered( extra.type.fmt(self), }); }, - .@"llvm.fma." => |tag| { - const extra = - function.extraData(Function.Instruction.FusedMultiplyAdd, instruction.data); - const ty = instruction_index.typeOf(function_index, self); - try writer.print(" %{} = call {%} @{s}{m}({%}, {%}, {%})\n", .{ - instruction_index.name(&function).fmt(self), - ty.fmt(self), - @tagName(tag), - ty.fmt(self), - extra.a.fmt(function_index, self), - extra.b.fmt(function_index, self), - extra.c.fmt(function_index, self), - }); - }, } } try writer.writeByte('}'); @@ -9623,13 +10273,13 @@ fn binConstAssumeCapacity( fn asmConstAssumeCapacity( self: *Builder, ty: Type, - info: Constant.Asm.Info, + info: Constant.Assembly.Info, assembly: String, constraints: String, ) Constant { assert(ty.functionKind(self) == .normal); - const Key = struct { tag: Constant.Tag, extra: Constant.Asm }; + const Key = struct { tag: Constant.Tag, extra: Constant.Assembly }; const Adapter = struct { builder: *const Builder, pub fn hash(_: @This(), key: Key) u32 { @@ -9641,7 +10291,7 @@ fn asmConstAssumeCapacity( pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false; const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; - const rhs_extra = ctx.builder.constantExtraData(Constant.Asm, rhs_data); + const rhs_extra = ctx.builder.constantExtraData(Constant.Assembly, rhs_data); return std.meta.eql(lhs_key.extra, rhs_extra); } }; diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index fd111ac94f..9220c30a09 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -345,9 +345,6 @@ pub const Value = opaque { pub const addSretAttr = ZigLLVMAddSretAttr; extern fn ZigLLVMAddSretAttr(fn_ref: *Value, type_val: *Type) void; - pub const setCallSret = ZigLLVMSetCallSret; - extern fn ZigLLVMSetCallSret(Call: *Value, return_type: *Type) void; - pub const getParam = LLVMGetParam; extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value; @@ -488,9 +485,6 @@ pub const Module = opaque { pub const getNamedFunction = LLVMGetNamedFunction; extern fn LLVMGetNamedFunction(*Module, Name: [*:0]const u8) ?*Value; - pub const getIntrinsicDeclaration = LLVMGetIntrinsicDeclaration; - extern fn LLVMGetIntrinsicDeclaration(Mod: *Module, ID: c_uint, ParamTypes: ?[*]const *Type, ParamCount: usize) *Value; - pub const printToString = LLVMPrintModuleToString; extern fn LLVMPrintModuleToString(*Module) [*:0]const u8; @@ -664,18 +658,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildCallOld = ZigLLVMBuildCall; - extern fn ZigLLVMBuildCall( - *Builder, - *Type, - Fn: *Value, - Args: [*]const *Value, - NumArgs: c_uint, - CC: CallConv, - attr: CallAttr, - Name: [*:0]const u8, - ) *Value; - pub const buildRetVoid = LLVMBuildRetVoid; extern fn LLVMBuildRetVoid(*Builder) *Value; @@ -712,12 +694,6 @@ pub const Builder = opaque { pub const buildNUWAdd = LLVMBuildNUWAdd; extern fn LLVMBuildNUWAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildSAddSat = ZigLLVMBuildSAddSat; - extern fn ZigLLVMBuildSAddSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - - pub const buildUAddSat = ZigLLVMBuildUAddSat; - extern fn ZigLLVMBuildUAddSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildFSub = LLVMBuildFSub; extern fn LLVMBuildFSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; @@ -733,12 +709,6 @@ pub const Builder = opaque { pub const buildNUWSub = LLVMBuildNUWSub; extern fn LLVMBuildNUWSub(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildSSubSat = ZigLLVMBuildSSubSat; - extern fn ZigLLVMBuildSSubSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - - pub const buildUSubSat = ZigLLVMBuildUSubSat; - extern fn ZigLLVMBuildUSubSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildFMul = LLVMBuildFMul; extern fn LLVMBuildFMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; @@ -751,12 +721,6 @@ pub const Builder = opaque { pub const buildNUWMul = LLVMBuildNUWMul; extern fn LLVMBuildNUWMul(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildSMulFixSat = ZigLLVMBuildSMulFixSat; - extern fn ZigLLVMBuildSMulFixSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - - pub const buildUMulFixSat = ZigLLVMBuildUMulFixSat; - extern fn ZigLLVMBuildUMulFixSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildUDiv = LLVMBuildUDiv; extern fn LLVMBuildUDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; @@ -799,21 +763,12 @@ pub const Builder = opaque { pub const buildNSWShl = ZigLLVMBuildNSWShl; extern fn ZigLLVMBuildNSWShl(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildSShlSat = ZigLLVMBuildSShlSat; - extern fn ZigLLVMBuildSShlSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - - pub const buildUShlSat = ZigLLVMBuildUShlSat; - extern fn ZigLLVMBuildUShlSat(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildOr = LLVMBuildOr; extern fn LLVMBuildOr(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; pub const buildXor = LLVMBuildXor; extern fn LLVMBuildXor(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; - pub const buildIntCast2 = LLVMBuildIntCast2; - extern fn LLVMBuildIntCast2(*Builder, Val: *Value, DestTy: *Type, IsSigned: Bool, Name: [*:0]const u8) *Value; - pub const buildBitCast = LLVMBuildBitCast; extern fn LLVMBuildBitCast(*Builder, Val: *Value, DestTy: *Type, Name: [*:0]const u8) *Value; @@ -1020,81 +975,6 @@ pub const Builder = opaque { is_volatile: bool, ) *Value; - pub const buildMaxNum = ZigLLVMBuildMaxNum; - extern fn ZigLLVMBuildMaxNum(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildMinNum = ZigLLVMBuildMinNum; - extern fn ZigLLVMBuildMinNum(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildCeil = ZigLLVMBuildCeil; - extern fn ZigLLVMBuildCeil(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildCos = ZigLLVMBuildCos; - extern fn ZigLLVMBuildCos(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildExp = ZigLLVMBuildExp; - extern fn ZigLLVMBuildExp(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildExp2 = ZigLLVMBuildExp2; - extern fn ZigLLVMBuildExp2(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildFAbs = ZigLLVMBuildFAbs; - extern fn ZigLLVMBuildFAbs(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildFloor = ZigLLVMBuildFloor; - extern fn ZigLLVMBuildFloor(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildLog = ZigLLVMBuildLog; - extern fn ZigLLVMBuildLog(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildLog10 = ZigLLVMBuildLog10; - extern fn ZigLLVMBuildLog10(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildLog2 = ZigLLVMBuildLog2; - extern fn ZigLLVMBuildLog2(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildRound = ZigLLVMBuildRound; - extern fn ZigLLVMBuildRound(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildSin = ZigLLVMBuildSin; - extern fn ZigLLVMBuildSin(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildSqrt = ZigLLVMBuildSqrt; - extern fn ZigLLVMBuildSqrt(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildFTrunc = ZigLLVMBuildFTrunc; - extern fn ZigLLVMBuildFTrunc(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildBitReverse = ZigLLVMBuildBitReverse; - extern fn ZigLLVMBuildBitReverse(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildBSwap = ZigLLVMBuildBSwap; - extern fn ZigLLVMBuildBSwap(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildCTPop = ZigLLVMBuildCTPop; - extern fn ZigLLVMBuildCTPop(builder: *Builder, V: *Value, name: [*:0]const u8) *Value; - - pub const buildCTLZ = ZigLLVMBuildCTLZ; - extern fn ZigLLVMBuildCTLZ(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildCTTZ = ZigLLVMBuildCTTZ; - extern fn ZigLLVMBuildCTTZ(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildFMA = ZigLLVMBuildFMA; - extern fn ZigLLVMBuildFMA(builder: *Builder, a: *Value, b: *Value, c: *Value, name: [*:0]const u8) *Value; - - pub const buildUMax = ZigLLVMBuildUMax; - extern fn ZigLLVMBuildUMax(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildUMin = ZigLLVMBuildUMin; - extern fn ZigLLVMBuildUMin(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildSMax = ZigLLVMBuildSMax; - extern fn ZigLLVMBuildSMax(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - - pub const buildSMin = ZigLLVMBuildSMin; - extern fn ZigLLVMBuildSMin(builder: *Builder, LHS: *Value, RHS: *Value, name: [*:0]const u8) *Value; - pub const buildExactUDiv = LLVMBuildExactUDiv; extern fn LLVMBuildExactUDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; @@ -1563,9 +1443,6 @@ extern fn ZigLLVMWriteImportLibrary( kill_at: bool, ) bool; -pub const setCallElemTypeAttr = ZigLLVMSetCallElemTypeAttr; -extern fn ZigLLVMSetCallElemTypeAttr(Call: *Value, arg_index: usize, return_type: *Type) void; - pub const Linkage = enum(c_uint) { External, AvailableExternally, @@ -1784,9 +1661,6 @@ pub const DIGlobalVariable = opaque { pub const DIGlobalVariableExpression = opaque { pub const getVariable = ZigLLVMGlobalGetVariable; extern fn ZigLLVMGlobalGetVariable(global_variable: *DIGlobalVariableExpression) *DIGlobalVariable; - - pub const getExpression = ZigLLVMGlobalGetExpression; - extern fn ZigLLVMGlobalGetExpression(global_variable: *DIGlobalVariableExpression) *DIGlobalExpression; }; pub const DIType = opaque { pub const toScope = ZigLLVMTypeToScope; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index e6568e97a8..5b599d8e0e 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -78,30 +78,6 @@ using namespace llvm; -void ZigLLVMInitializeLoopStrengthReducePass(LLVMPassRegistryRef R) { - initializeLoopStrengthReducePass(*unwrap(R)); -} - -void ZigLLVMInitializeLowerIntrinsicsPass(LLVMPassRegistryRef R) { - initializeLowerIntrinsicsPass(*unwrap(R)); -} - -char *ZigLLVMGetHostCPUName(void) { - return strdup((const char *)sys::getHostCPUName().bytes_begin()); -} - -char *ZigLLVMGetNativeFeatures(void) { - SubtargetFeatures features; - - StringMap host_features; - if (sys::getHostCPUFeatures(host_features)) { - for (auto &F : host_features) - features.AddFeature(F.first(), F.second); - } - - return strdup((const char *)StringRef(features.getString()).bytes_begin()); -} - #ifndef NDEBUG static const bool assertions_on = true; #else @@ -179,14 +155,6 @@ LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, const char *Tri return reinterpret_cast(TM); } -unsigned ZigLLVMDataLayoutGetStackAlignment(LLVMTargetDataRef TD) { - return unwrap(TD)->getStackAlignment().value(); -} - -unsigned ZigLLVMDataLayoutGetProgramAddressSpace(LLVMTargetDataRef TD) { - return unwrap(TD)->getProgramAddressSpace(); -} - namespace { // LLVM's time profiler can provide a hierarchy view of the time spent // in each component. It generates JSON report in Chrome's "Trace Event" @@ -410,12 +378,7 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM return false; } -ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) { - return wrap(Type::getTokenTy(*unwrap(context_ref))); -} - - -ZIG_EXTERN_C void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit) { +void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit) { static OptBisect opt_bisect; opt_bisect.setLimit(limit); unwrap(context_ref)->setOptPassGate(opt_bisect); @@ -426,35 +389,23 @@ LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, return wrap(func); } -LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, - LLVMValueRef *Args, unsigned NumArgs, ZigLLVM_CallingConv CC, ZigLLVM_CallAttr attr, - const char *Name) -{ - FunctionType *FTy = unwrap(Ty); - CallInst *call_inst = unwrap(B)->CreateCall(FTy, unwrap(Fn), - ArrayRef(unwrap(Args), NumArgs), Name); - call_inst->setCallingConv(static_cast(CC)); - switch (attr) { - case ZigLLVM_CallAttrAuto: - break; - case ZigLLVM_CallAttrNeverTail: - call_inst->setTailCallKind(CallInst::TCK_NoTail); +void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCallKind) { + CallInst::TailCallKind TCK; + switch (TailCallKind) { + case ZigLLVMTailCallKindNone: + TCK = CallInst::TCK_None; break; - case ZigLLVM_CallAttrNeverInline: - call_inst->addFnAttr(Attribute::NoInline); + case ZigLLVMTailCallKindTail: + TCK = CallInst::TCK_Tail; break; - case ZigLLVM_CallAttrAlwaysTail: - call_inst->setTailCallKind(CallInst::TCK_MustTail); + case ZigLLVMTailCallKindMustTail: + TCK = CallInst::TCK_MustTail; break; - case ZigLLVM_CallAttrAlwaysInline: - call_inst->addFnAttr(Attribute::AlwaysInline); + case ZigLLVMTailCallKindNoTail: + TCK = CallInst::TCK_NoTail; break; } - return wrap(call_inst); -} - -ZIG_EXTERN_C void ZigLLVMSetTailCallKind(LLVMValueRef Call, CallInst::TailCallKind TailCallKind) { - unwrap(Call)->setTailCallKind(TailCallKind); + unwrap(Call)->setTailCallKind(TCK); } void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A) { @@ -481,188 +432,6 @@ LLVMValueRef ZigLLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef return wrap(call_inst); } -LLVMValueRef ZigLLVMBuildCeil(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::ceil, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildCos(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::cos, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildExp(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::exp, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildExp2(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::exp2, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildFAbs(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::fabs, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildFloor(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::floor, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildLog(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::log, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildLog10(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::log10, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildLog2(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::log2, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildRound(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::round, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSin(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::sin, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSqrt(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::sqrt, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildFTrunc(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::trunc, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildBitReverse(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::bitreverse, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildBSwap(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::bswap, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildCTPop(LLVMBuilderRef B, LLVMValueRef V, const char *name) { - CallInst *call_inst = unwrap(B)->CreateUnaryIntrinsic(Intrinsic::ctpop, unwrap(V), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildCTLZ(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::ctlz, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildCTTZ(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::cttz, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildFMA(LLVMBuilderRef builder, LLVMValueRef A, LLVMValueRef B, LLVMValueRef C, const char *name) { - llvm::Type* types[1] = { - unwrap(A)->getType(), - }; - llvm::Value* values[3] = {unwrap(A), unwrap(B), unwrap(C)}; - - CallInst *call_inst = unwrap(builder)->CreateIntrinsic(Intrinsic::fma, types, values, nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildMaxNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateMaxNum(unwrap(LHS), unwrap(RHS), name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateMinNum(unwrap(LHS), unwrap(RHS), name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUMax(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::umax, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUMin(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::umin, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSMax(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::smax, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSMin(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::smin, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSAddSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::sadd_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUAddSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::uadd_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSSubSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::ssub_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUSubSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::usub_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - llvm::Type* types[1] = { - unwrap(LHS)->getType(), - }; - // pass scale = 0 as third argument - llvm::Value* values[3] = {unwrap(LHS), unwrap(RHS), unwrap(B)->getInt32(0)}; - - CallInst *call_inst = unwrap(B)->CreateIntrinsic(Intrinsic::smul_fix_sat, types, values, nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - llvm::Type* types[1] = { - unwrap(LHS)->getType(), - }; - // pass scale = 0 as third argument - llvm::Value* values[3] = {unwrap(LHS), unwrap(RHS), unwrap(B)->getInt32(0)}; - - CallInst *call_inst = unwrap(B)->CreateIntrinsic(Intrinsic::umul_fix_sat, types, values, nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildSShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::sshl_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name) { - CallInst *call_inst = unwrap(B)->CreateBinaryIntrinsic(Intrinsic::ushl_sat, unwrap(LHS), unwrap(RHS), nullptr, name); - return wrap(call_inst); -} - void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) { assert( isa(unwrap(fn)) ); Function *unwrapped_function = reinterpret_cast(unwrap(fn)); @@ -1206,14 +975,6 @@ void ZigLLVMAddSretAttr(LLVMValueRef fn_ref, LLVMTypeRef type_val) { func->addParamAttrs(0, attr_builder); } -void ZigLLVMAddFunctionElemTypeAttr(LLVMValueRef fn_ref, size_t arg_index, LLVMTypeRef elem_ty) { - Function *func = unwrap(fn_ref); - AttrBuilder attr_builder(func->getContext()); - Type *llvm_type = unwrap(elem_ty); - attr_builder.addTypeAttr(Attribute::ElementType, llvm_type); - func->addParamAttrs(arg_index, attr_builder); -} - void ZigLLVMAddFunctionAttr(LLVMValueRef fn_ref, const char *attr_name, const char *attr_value) { Function *func = unwrap(fn_ref); func->addFnAttr(attr_name, attr_value); @@ -1223,40 +984,6 @@ void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv) { cl::ParseCommandLineOptions(argc, argv); } -const char *ZigLLVMGetArchTypeName(ZigLLVM_ArchType arch) { - return (const char*)Triple::getArchTypeName((Triple::ArchType)arch).bytes_begin(); -} - -const char *ZigLLVMGetVendorTypeName(ZigLLVM_VendorType vendor) { - return (const char*)Triple::getVendorTypeName((Triple::VendorType)vendor).bytes_begin(); -} - -const char *ZigLLVMGetOSTypeName(ZigLLVM_OSType os) { - const char* name = (const char*)Triple::getOSTypeName((Triple::OSType)os).bytes_begin(); - if (strcmp(name, "macosx") == 0) return "macos"; - return name; -} - -const char *ZigLLVMGetEnvironmentTypeName(ZigLLVM_EnvironmentType env_type) { - return (const char*)Triple::getEnvironmentTypeName((Triple::EnvironmentType)env_type).bytes_begin(); -} - -void ZigLLVMGetNativeTarget(ZigLLVM_ArchType *arch_type, - ZigLLVM_VendorType *vendor_type, ZigLLVM_OSType *os_type, ZigLLVM_EnvironmentType *environ_type, - ZigLLVM_ObjectFormatType *oformat) -{ - char *native_triple = LLVMGetDefaultTargetTriple(); - Triple triple(Triple::normalize(native_triple)); - - *arch_type = (ZigLLVM_ArchType)triple.getArch(); - *vendor_type = (ZigLLVM_VendorType)triple.getVendor(); - *os_type = (ZigLLVM_OSType)triple.getOS(); - *environ_type = (ZigLLVM_EnvironmentType)triple.getEnvironment(); - *oformat = (ZigLLVM_ObjectFormatType)triple.getObjectFormat(); - - free(native_triple); -} - void ZigLLVMAddModuleDebugInfoFlag(LLVMModuleRef module, bool produce_dwarf64) { unwrap(module)->addModuleFlag(Module::Warning, "Debug Info Version", DEBUG_METADATA_VERSION); unwrap(module)->addModuleFlag(Module::Warning, "Dwarf Version", 4); @@ -1314,50 +1041,6 @@ LLVMValueRef ZigLLVMBuildAllocaInAddressSpace(LLVMBuilderRef builder, LLVMTypeRe return wrap(unwrap(builder)->CreateAlloca(unwrap(Ty), AddressSpace, nullptr, Name)); } -void ZigLLVMSetTailCall(LLVMValueRef Call) { - unwrap(Call)->setTailCallKind(CallInst::TCK_MustTail); -} - -void ZigLLVMSetCallSret(LLVMValueRef Call, LLVMTypeRef return_type) { - CallInst *call_inst = unwrap(Call); - Type *llvm_type = unwrap(return_type); - call_inst->addParamAttr(AttributeList::ReturnIndex, - Attribute::getWithStructRetType(call_inst->getContext(), llvm_type)); -} - -void ZigLLVMSetCallElemTypeAttr(LLVMValueRef Call, size_t arg_index, LLVMTypeRef return_type) { - CallInst *call_inst = unwrap(Call); - Type *llvm_type = unwrap(return_type); - call_inst->addParamAttr(arg_index, - Attribute::get(call_inst->getContext(), Attribute::ElementType, llvm_type)); -} - -void ZigLLVMFunctionSetPrefixData(LLVMValueRef function, LLVMValueRef data) { - unwrap(function)->setPrefixData(unwrap(data)); -} - -void ZigLLVMFunctionSetCallingConv(LLVMValueRef function, ZigLLVM_CallingConv cc) { - unwrap(function)->setCallingConv(static_cast(cc)); -} - -class MyOStream: public raw_ostream { - public: - MyOStream(void (*_append_diagnostic)(void *, const char *, size_t), void *_context) : - raw_ostream(true), append_diagnostic(_append_diagnostic), context(_context), pos(0) { - - } - void write_impl(const char *ptr, size_t len) override { - append_diagnostic(context, ptr, len); - pos += len; - } - uint64_t current_pos() const override { - return pos; - } - void (*append_diagnostic)(void *, const char *, size_t); - void *context; - size_t pos; -}; - bool ZigLLVMWriteImportLibrary(const char *def_path, const ZigLLVM_ArchType arch, const char *output_lib_path, bool kill_at) { @@ -1549,10 +1232,6 @@ ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpress return reinterpret_cast(reinterpret_cast(global_variable_expression)->getVariable()); } -ZigLLVMDIGlobalExpression* ZigLLVMGlobalGetExpression(ZigLLVMDIGlobalVariableExpression *global_variable_expression) { - return reinterpret_cast(reinterpret_cast(global_variable_expression)->getExpression()); -} - void ZigLLVMAttachMetaData(LLVMValueRef Val, ZigLLVMDIGlobalVariableExpression *global_variable_expression) { unwrap(Val)->addDebugInfo(reinterpret_cast(global_variable_expression)); } diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 3f5fefb85b..258e846032 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -43,13 +43,6 @@ struct ZigLLVMInsertionPoint; struct ZigLLVMDINode; struct ZigLLVMMDString; -ZIG_EXTERN_C void ZigLLVMInitializeLoopStrengthReducePass(LLVMPassRegistryRef R); -ZIG_EXTERN_C void ZigLLVMInitializeLowerIntrinsicsPass(LLVMPassRegistryRef R); - -/// Caller must free memory with LLVMDisposeMessage -ZIG_EXTERN_C char *ZigLLVMGetHostCPUName(void); -ZIG_EXTERN_C char *ZigLLVMGetNativeFeatures(void); - ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref, char **error_message, bool is_debug, bool is_small, bool time_report, bool tsan, bool lto, @@ -67,13 +60,20 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co const char *CPU, const char *Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc, LLVMCodeModel CodeModel, bool function_sections, enum ZigLLVMABIType float_abi, const char *abi_name); -ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref); - ZIG_EXTERN_C void ZigLLVMSetOptBisectLimit(LLVMContextRef context_ref, int limit); ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace); +enum ZigLLVMTailCallKind { + ZigLLVMTailCallKindNone, + ZigLLVMTailCallKindTail, + ZigLLVMTailCallKindMustTail, + ZigLLVMTailCallKindNoTail, +}; + +ZIG_EXTERN_C void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCallKind); + enum ZigLLVM_CallingConv { ZigLLVM_C = 0, ZigLLVM_Fast = 8, @@ -129,10 +129,6 @@ enum ZigLLVM_CallAttr { ZigLLVM_CallAttrAlwaysTail, ZigLLVM_CallAttrAlwaysInline, }; -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMTypeRef function_type, - LLVMValueRef Fn, LLVMValueRef *Args, unsigned NumArgs, enum ZigLLVM_CallingConv CC, - enum ZigLLVM_CallAttr attr, const char *Name); - ZIG_EXTERN_C void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A); ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, @@ -141,47 +137,6 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef Val, LLVMValueRef Size, unsigned Align, bool isVolatile); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCeil(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCos(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildExp(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildExp2(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFAbs(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFloor(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildLog(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildLog10(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildLog2(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildRound(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSin(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSqrt(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFTrunc(LLVMBuilderRef builder, LLVMValueRef V, const char* name); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildBitReverse(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildBSwap(LLVMBuilderRef builder, LLVMValueRef V, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCTPop(LLVMBuilderRef builder, LLVMValueRef V, const char* name); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCTLZ(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildCTTZ(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFMA(LLVMBuilderRef builder, LLVMValueRef A, LLVMValueRef B, LLVMValueRef C, const char* name); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMaxNum(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMinNum(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMax(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMin(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMax(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMin(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUAddSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSAddSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUSubSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSSubSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUMulFixSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS, const char *name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildSShlSat(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char* name); -ZIG_EXTERN_C LLVMValueRef LLVMBuildVectorSplat(LLVMBuilderRef B, unsigned elem_count, LLVMValueRef V, const char *Name); - - ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char *name); ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNUWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, @@ -345,22 +300,15 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDbgValueIntrinsicAtEnd(struct ZigLLVMDIBu struct ZigLLVMDILocation *debug_loc, LLVMBasicBlockRef basic_block_ref); ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state); -ZIG_EXTERN_C void ZigLLVMSetTailCall(LLVMValueRef Call); -ZIG_EXTERN_C void ZigLLVMSetCallSret(LLVMValueRef Call, LLVMTypeRef return_type); -ZIG_EXTERN_C void ZigLLVMSetCallElemTypeAttr(LLVMValueRef Call, size_t arg_index, LLVMTypeRef return_type); -ZIG_EXTERN_C void ZigLLVMFunctionSetPrefixData(LLVMValueRef fn, LLVMValueRef data); -ZIG_EXTERN_C void ZigLLVMFunctionSetCallingConv(LLVMValueRef function, enum ZigLLVM_CallingConv cc); ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value); ZIG_EXTERN_C void ZigLLVMAddByValAttr(LLVMValueRef fn_ref, unsigned ArgNo, LLVMTypeRef type_val); ZIG_EXTERN_C void ZigLLVMAddSretAttr(LLVMValueRef fn_ref, LLVMTypeRef type_val); -ZIG_EXTERN_C void ZigLLVMAddFunctionElemTypeAttr(LLVMValueRef fn_ref, size_t arg_index, LLVMTypeRef elem_ty); ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn); ZIG_EXTERN_C void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv); ZIG_EXTERN_C ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression); -ZIG_EXTERN_C ZigLLVMDIGlobalExpression* ZigLLVMGlobalGetExpression(ZigLLVMDIGlobalVariableExpression *global_variable_expression); ZIG_EXTERN_C void ZigLLVMAttachMetaData(LLVMValueRef Val, ZigLLVMDIGlobalVariableExpression *global_variable_expression); @@ -610,11 +558,6 @@ ZIG_EXTERN_C void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim); #define ZigLLVM_DIFlags_LittleEndian (1U << 28) #define ZigLLVM_DIFlags_AllCallsDescribed (1U << 29) -ZIG_EXTERN_C const char *ZigLLVMGetArchTypeName(enum ZigLLVM_ArchType arch); -ZIG_EXTERN_C const char *ZigLLVMGetVendorTypeName(enum ZigLLVM_VendorType vendor); -ZIG_EXTERN_C const char *ZigLLVMGetOSTypeName(enum ZigLLVM_OSType os); -ZIG_EXTERN_C const char *ZigLLVMGetEnvironmentTypeName(enum ZigLLVM_EnvironmentType abi); - ZIG_EXTERN_C bool ZigLLDLinkCOFF(int argc, const char **argv, bool can_exit_early, bool disable_output); ZIG_EXTERN_C bool ZigLLDLinkELF(int argc, const char **argv, bool can_exit_early, bool disable_output); ZIG_EXTERN_C bool ZigLLDLinkWasm(int argc, const char **argv, bool can_exit_early, bool disable_output); @@ -625,11 +568,4 @@ ZIG_EXTERN_C bool ZigLLVMWriteArchive(const char *archive_name, const char **fil ZIG_EXTERN_C bool ZigLLVMWriteImportLibrary(const char *def_path, const enum ZigLLVM_ArchType arch, const char *output_lib_path, bool kill_at); -ZIG_EXTERN_C void ZigLLVMGetNativeTarget(enum ZigLLVM_ArchType *arch_type, - enum ZigLLVM_VendorType *vendor_type, enum ZigLLVM_OSType *os_type, enum ZigLLVM_EnvironmentType *environ_type, - enum ZigLLVM_ObjectFormatType *oformat); - -ZIG_EXTERN_C unsigned ZigLLVMDataLayoutGetStackAlignment(LLVMTargetDataRef TD); -ZIG_EXTERN_C unsigned ZigLLVMDataLayoutGetProgramAddressSpace(LLVMTargetDataRef TD); - #endif -- cgit v1.2.3 From 5b79f08ee82a48d84146706faf2229e7c298340d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 03:36:11 -0400 Subject: llvm: finish converting attributes to use Builder and the C LLVM API --- src/codegen/llvm.zig | 239 ++++++------------------------------------ src/codegen/llvm/Builder.zig | 109 +++++++++++++++++-- src/codegen/llvm/bindings.zig | 13 +-- src/zig_llvm.cpp | 46 -------- src/zig_llvm.h | 14 --- 5 files changed, 133 insertions(+), 288 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 48e45400cd..ce892b2d03 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1314,28 +1314,21 @@ pub const Object = struct { if (func.analysis(ip).is_noinline) { try attributes.addFnAttr(.@"noinline", &o.builder); - o.addFnAttr(llvm_func, "noinline"); } else { _ = try attributes.removeFnAttr(.@"noinline"); - Object.removeFnAttr(llvm_func, "noinline"); } if (func.analysis(ip).stack_alignment.toByteUnitsOptional()) |alignment| { try attributes.addFnAttr(.{ .alignstack = Builder.Alignment.fromByteUnits(alignment) }, &o.builder); try attributes.addFnAttr(.@"noinline", &o.builder); - o.addFnAttrInt(llvm_func, "alignstack", alignment); - o.addFnAttr(llvm_func, "noinline"); } else { _ = try attributes.removeFnAttr(.alignstack); - Object.removeFnAttr(llvm_func, "alignstack"); } if (func.analysis(ip).is_cold) { try attributes.addFnAttr(.cold, &o.builder); - o.addFnAttr(llvm_func, "cold"); } else { _ = try attributes.removeFnAttr(.cold); - Object.removeFnAttr(llvm_func, "cold"); } // TODO: disable this if safety is off for the function scope @@ -1346,10 +1339,6 @@ pub const Object = struct { .kind = try o.builder.string("stack-protector-buffer-size"), .value = try o.builder.fmt("{d}", .{ssp_buf_size}), } }, &o.builder); - var buf: [12]u8 = undefined; - const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable; - o.addFnAttr(llvm_func, "sspstrong"); - o.addFnAttrString(llvm_func, "stack-protector-buffer-size", arg); } // TODO: disable this if safety is off for the function scope @@ -1358,13 +1347,11 @@ pub const Object = struct { .kind = try o.builder.string("probe-stack"), .value = try o.builder.string("__zig_probe_stack"), } }, &o.builder); - o.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack"); } else if (target.os.tag == .uefi) { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("no-stack-arg-probe"), .value = .empty, } }, &o.builder); - o.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| { @@ -1389,14 +1376,8 @@ pub const Object = struct { } else .none; if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { - .signed => { - try attributes.addRetAttr(.signext, &o.builder); - o.addAttr(llvm_func, 0, "signext"); - }, - .unsigned => { - try attributes.addRetAttr(.zeroext, &o.builder); - o.addAttr(llvm_func, 0, "zeroext"); - }, + .signed => try attributes.addRetAttr(.signext, &o.builder), + .unsigned => try attributes.addRetAttr(.zeroext, &o.builder), }; const err_return_tracing = fn_info.return_type.toType().isError(mod) and @@ -1437,7 +1418,7 @@ pub const Object = struct { } else { args.appendAssumeCapacity(param); - try o.addByValParamAttrsOld(&attributes, llvm_func, param_ty, param_index, fn_info, llvm_arg_i); + try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, llvm_arg_i); } llvm_arg_i += 1; }, @@ -1447,7 +1428,7 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); - try o.addByRefParamAttrsOld(&attributes, llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); + try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; if (isByRef(param_ty, mod)) { @@ -1463,7 +1444,6 @@ pub const Object = struct { const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder); - o.addArgAttr(llvm_func, llvm_arg_i, "noundef"); llvm_arg_i += 1; if (isByRef(param_ty, mod)) { @@ -1500,23 +1480,19 @@ pub const Object = struct { if (math.cast(u5, it.zig_index - 1)) |i| { if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder); - o.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } if (param_ty.zigTypeTag(mod) != .Optional) { try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); - o.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); } if (ptr_info.flags.is_const) { try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder); - o.addArgAttr(llvm_func, llvm_arg_i, "readonly"); } const elem_align = Builder.Alignment.fromByteUnits( ptr_info.flags.alignment.toByteUnitsOptional() orelse @max(ptr_info.child.toType().abiAlignment(mod), 1), ); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); - o.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align.toByteUnits() orelse 0); const ptr_param = wip.arg(llvm_arg_i); llvm_arg_i += 1; const len_param = wip.arg(llvm_arg_i); @@ -1590,7 +1566,7 @@ pub const Object = struct { } } - function.ptr(&o.builder).attributes = try attributes.finish(&o.builder); + function.setAttributes(try attributes.finish(&o.builder), &o.builder); var di_file: ?*llvm.DIFile = null; var di_scope: ?*llvm.DIScope = null; @@ -2951,15 +2927,11 @@ pub const Object = struct { .kind = try o.builder.string("wasm-import-name"), .value = try o.builder.string(ip.stringToSlice(decl.name)), } }, &o.builder); - o.addFnAttrString(llvm_fn, "wasm-import-name", ip.stringToSlice(decl.name)); if (ip.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - try attributes.addFnAttr(.{ .string = .{ - .kind = try o.builder.string("wasm-import-module"), - .value = try o.builder.string(lib_name), - } }, &o.builder); - o.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); - } + if (!std.mem.eql(u8, lib_name, "c")) try attributes.addFnAttr(.{ .string = .{ + .kind = try o.builder.string("wasm-import-module"), + .value = try o.builder.string(lib_name), + } }, &o.builder); } } } @@ -2969,12 +2941,9 @@ pub const Object = struct { // Sret pointers must not be address 0 try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); // Sret pointers must not be address 0 - o.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); const raw_llvm_ret_ty = try o.lowerType(fn_info.return_type.toType()); try attributes.addParamAttr(llvm_arg_i, .{ .sret = raw_llvm_ret_ty }, &o.builder); - llvm_fn.addSretAttr(raw_llvm_ret_ty.toLlvm(&o.builder)); llvm_arg_i += 1; } @@ -2984,7 +2953,6 @@ pub const Object = struct { if (err_return_tracing) { try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); llvm_arg_i += 1; } @@ -2995,7 +2963,6 @@ pub const Object = struct { }, .Naked => { try attributes.addFnAttr(.naked, &o.builder); - o.addFnAttr(llvm_fn, "naked"); }, .Async => { function.call_conv = .fastcc; @@ -3014,11 +2981,10 @@ pub const Object = struct { } // Function attributes that are independent of analysis results of the function body. - try o.addCommonFnAttributes(&attributes, llvm_fn); + try o.addCommonFnAttributes(&attributes); if (fn_info.return_type == .noreturn_type) { try attributes.addFnAttr(.noreturn, &o.builder); - o.addFnAttr(llvm_fn, "noreturn"); } // Add parameter attributes. We handle only the case of extern functions (no body) @@ -3031,7 +2997,7 @@ pub const Object = struct { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); if (!isByRef(param_ty, mod)) { - try o.addByValParamAttrsOld(&attributes, llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); + try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { @@ -3039,11 +3005,10 @@ pub const Object = struct { const param_llvm_ty = try o.lowerType(param_ty.toType()); const alignment = Builder.Alignment.fromByteUnits(param_ty.toType().abiAlignment(mod)); - try o.addByRefParamAttrsOld(&attributes, llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); + try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder); - o.addArgAttr(llvm_fn, it.llvm_index - 1, "noundef"); }, // No attributes needed for these. .no_bits, @@ -3060,43 +3025,36 @@ pub const Object = struct { }; } - function.attributes = try attributes.finish(&o.builder); - try o.builder.llvm.globals.append(o.gpa, llvm_fn); gop.value_ptr.* = try o.builder.addGlobal(fqn, global); try o.builder.functions.append(o.gpa, function); + global.kind.function.setAttributes(try attributes.finish(&o.builder), &o.builder); return global.kind.function; } fn addCommonFnAttributes( o: *Object, attributes: *Builder.FunctionAttributes.Wip, - llvm_fn: *llvm.Value, ) Allocator.Error!void { const comp = o.module.comp; if (!comp.bin_file.options.red_zone) { try attributes.addFnAttr(.noredzone, &o.builder); - o.addFnAttr(llvm_fn, "noredzone"); } if (comp.bin_file.options.omit_frame_pointer) { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("frame-pointer"), .value = try o.builder.string("none"), } }, &o.builder); - o.addFnAttrString(llvm_fn, "frame-pointer", "none"); } else { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("frame-pointer"), .value = try o.builder.string("all"), } }, &o.builder); - o.addFnAttrString(llvm_fn, "frame-pointer", "all"); } try attributes.addFnAttr(.nounwind, &o.builder); - o.addFnAttr(llvm_fn, "nounwind"); if (comp.unwind_tables) { try attributes.addFnAttr(.{ .uwtable = Builder.Attribute.UwTable.default }, &o.builder); - o.addFnAttrInt(llvm_fn, "uwtable", 2); } if (comp.bin_file.options.skip_linker_dependencies or comp.bin_file.options.no_builtin) @@ -3107,38 +3065,31 @@ pub const Object = struct { // body of memcpy with a call to memcpy, which would then cause a stack // overflow instead of performing memcpy. try attributes.addFnAttr(.nobuiltin, &o.builder); - o.addFnAttr(llvm_fn, "nobuiltin"); } if (comp.bin_file.options.optimize_mode == .ReleaseSmall) { try attributes.addFnAttr(.minsize, &o.builder); try attributes.addFnAttr(.optsize, &o.builder); - o.addFnAttr(llvm_fn, "minsize"); - o.addFnAttr(llvm_fn, "optsize"); } if (comp.bin_file.options.tsan) { try attributes.addFnAttr(.sanitize_thread, &o.builder); - o.addFnAttr(llvm_fn, "sanitize_thread"); } if (comp.getTarget().cpu.model.llvm_name) |s| { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("target-cpu"), .value = try o.builder.string(s), } }, &o.builder); - llvm_fn.addFunctionAttr("target-cpu", s); } if (comp.bin_file.options.llvm_cpu_features) |s| { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("target-features"), .value = try o.builder.string(std.mem.span(s)), } }, &o.builder); - llvm_fn.addFunctionAttr("target-features", s); } if (comp.getTarget().cpu.arch.isBpf()) { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("no-builtins"), .value = .empty, } }, &o.builder); - llvm_fn.addFunctionAttr("no-builtins", ""); } } @@ -4483,69 +4434,6 @@ pub const Object = struct { return o.builder.castConst(.inttoptr, try o.builder.intConst(llvm_usize, int), llvm_ptr_ty); } - fn addAttr(o: *Object, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { - return o.addAttrInt(val, index, name, 0); - } - - fn addArgAttr(o: *Object, fn_val: *llvm.Value, param_index: u32, attr_name: []const u8) void { - return o.addAttr(fn_val, param_index + 1, attr_name); - } - - fn addArgAttrInt(o: *Object, fn_val: *llvm.Value, param_index: u32, attr_name: []const u8, int: u64) void { - return o.addAttrInt(fn_val, param_index + 1, attr_name, int); - } - - fn removeAttr(val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { - const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); - assert(kind_id != 0); - val.removeEnumAttributeAtIndex(index, kind_id); - } - - fn addAttrInt( - o: *Object, - val: *llvm.Value, - index: llvm.AttributeIndex, - name: []const u8, - int: u64, - ) void { - const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); - assert(kind_id != 0); - const llvm_attr = o.builder.llvm.context.createEnumAttribute(kind_id, int); - val.addAttributeAtIndex(index, llvm_attr); - } - - fn addAttrString( - o: *Object, - val: *llvm.Value, - index: llvm.AttributeIndex, - name: []const u8, - value: []const u8, - ) void { - const llvm_attr = o.builder.llvm.context.createStringAttribute( - name.ptr, - @intCast(name.len), - value.ptr, - @intCast(value.len), - ); - val.addAttributeAtIndex(index, llvm_attr); - } - - fn addFnAttr(o: *Object, val: *llvm.Value, name: []const u8) void { - o.addAttr(val, std.math.maxInt(llvm.AttributeIndex), name); - } - - fn addFnAttrString(o: *Object, val: *llvm.Value, name: []const u8, value: []const u8) void { - o.addAttrString(val, std.math.maxInt(llvm.AttributeIndex), name, value); - } - - fn removeFnAttr(fn_val: *llvm.Value, name: []const u8) void { - removeAttr(fn_val, std.math.maxInt(llvm.AttributeIndex), name); - } - - fn addFnAttrInt(o: *Object, fn_val: *llvm.Value, name: []const u8, int: u64) void { - return o.addAttrInt(fn_val, std.math.maxInt(llvm.AttributeIndex), name, int); - } - /// If the operand type of an atomic operation is not byte sized we need to /// widen it before using it and then truncate the result. /// RMW exchange of floating-point values is bitcasted to same-sized integer @@ -4608,80 +4496,13 @@ pub const Object = struct { attributes: *Builder.FunctionAttributes.Wip, llvm_arg_i: u32, alignment: Builder.Alignment, - byval_attr: bool, + byval: bool, param_llvm_ty: Builder.Type, ) Allocator.Error!void { try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder); try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = alignment }, &o.builder); - if (byval_attr) { - try attributes.addParamAttr(llvm_arg_i, .{ .byval = param_llvm_ty }, &o.builder); - } - } - - fn addByValParamAttrsOld( - o: *Object, - attributes: *Builder.FunctionAttributes.Wip, - llvm_fn: *llvm.Value, - param_ty: Type, - param_index: u32, - fn_info: InternPool.Key.FuncType, - llvm_arg_i: u32, - ) Allocator.Error!void { - const mod = o.module; - if (param_ty.isPtrAtRuntime(mod)) { - const ptr_info = param_ty.ptrInfo(mod); - if (math.cast(u5, param_index)) |i| { - if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { - try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); - } - } - if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.flags.is_allowzero) { - try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); - } - if (ptr_info.flags.is_const) { - try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "readonly"); - } - const elem_align = Builder.Alignment.fromByteUnits( - ptr_info.flags.alignment.toByteUnitsOptional() orelse - @max(ptr_info.child.toType().abiAlignment(mod), 1), - ); - try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder); - o.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align.toByteUnits() orelse 0); - } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { - .signed => { - try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "signext"); - }, - .unsigned => { - try attributes.addParamAttr(llvm_arg_i, .zeroext, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"); - }, - }; - } - - fn addByRefParamAttrsOld( - o: *Object, - attributes: *Builder.FunctionAttributes.Wip, - llvm_fn: *llvm.Value, - llvm_arg_i: u32, - alignment: Builder.Alignment, - byval_attr: bool, - param_llvm_ty: Builder.Type, - ) Allocator.Error!void { - try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); - try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder); - try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = alignment }, &o.builder); - o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); - o.addArgAttr(llvm_fn, llvm_arg_i, "readonly"); - o.addArgAttrInt(llvm_fn, llvm_arg_i, "align", alignment.toByteUnits() orelse 0); - if (byval_attr) { - try attributes.addParamAttr(llvm_arg_i, .{ .byval = param_llvm_ty }, &o.builder); - llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder)); - } + if (byval) try attributes.addParamAttr(llvm_arg_i, .{ .byval = param_llvm_ty }, &o.builder); } }; @@ -9503,16 +9324,16 @@ pub const FuncGen = struct { var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); + try o.addCommonFnAttributes(&attributes); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; function_index.ptr(&o.builder).call_conv = .fastcc; - function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); gop.value_ptr.* = function_index; + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9577,16 +9398,16 @@ pub const FuncGen = struct { var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); + try o.addCommonFnAttributes(&attributes); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; function_index.ptr(&o.builder).call_conv = .fastcc; - function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9659,14 +9480,14 @@ pub const FuncGen = struct { var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); - - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); - try o.addCommonFnAttributes(&attributes, function_index.toLlvm(&o.builder)); + try o.addCommonFnAttributes(&attributes); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; function_index.ptr(&o.builder).call_conv = .fastcc; - function_index.ptr(&o.builder).attributes = try attributes.finish(&o.builder); + + function_index.toLlvm(&o.builder).setLinkage(.Internal); + function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); return function_index; } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 28545fe95e..c7e0937d41 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1349,21 +1349,29 @@ pub const Attribute = union(Kind) { //sanitize_memtag, sanitize_address_dyninit, - string = std.math.maxInt(u31) - 1, - none = std.math.maxInt(u31), + string = std.math.maxInt(u31), + none = std.math.maxInt(u32), _, pub const len = @typeInfo(Kind).Enum.fields.len - 2; pub fn fromString(str: String) Kind { assert(!str.isAnon()); - return @enumFromInt(@intFromEnum(str)); + const kind: Kind = @enumFromInt(@intFromEnum(str)); + assert(kind != .none); + return kind; } fn toString(self: Kind) ?String { + assert(self != .none); const str: String = @enumFromInt(@intFromEnum(self)); return if (str.isAnon()) null else str; } + + fn toLlvm(self: Kind, builder: *const Builder) *c_uint { + assert(builder.useLibLlvm()); + return &builder.llvm.attribute_kind_ids.?[@intFromEnum(self)]; + } }; pub const FpClass = packed struct(u32) { @@ -3147,6 +3155,86 @@ pub const Function = struct { return self.toConst(builder).toValue(); } + pub fn setAttributes( + self: Index, + new_function_attributes: FunctionAttributes, + builder: *Builder, + ) void { + if (builder.useLibLlvm()) { + const llvm_function = self.toLlvm(builder); + const old_function_attributes = self.ptrConst(builder).attributes; + for (0..@max( + old_function_attributes.slice(builder).len, + new_function_attributes.slice(builder).len, + )) |function_attribute_index| { + const llvm_attribute_index = + @as(llvm.AttributeIndex, @intCast(function_attribute_index)) -% 1; + const old_attributes_slice = + old_function_attributes.get(function_attribute_index, builder).slice(builder); + const new_attributes_slice = + new_function_attributes.get(function_attribute_index, builder).slice(builder); + var old_attribute_index: usize = 0; + var new_attribute_index: usize = 0; + while (true) { + const old_attribute_kind = if (old_attribute_index < old_attributes_slice.len) + old_attributes_slice[old_attribute_index].getKind(builder) + else + .none; + const new_attribute_kind = if (new_attribute_index < new_attributes_slice.len) + new_attributes_slice[new_attribute_index].getKind(builder) + else + .none; + switch (std.math.order( + @intFromEnum(old_attribute_kind), + @intFromEnum(new_attribute_kind), + )) { + .lt => { + // Removed + if (old_attribute_kind.toString()) |name| { + const slice = name.slice(builder).?; + llvm_function.removeStringAttributeAtIndex( + llvm_attribute_index, + slice.ptr, + @intCast(slice.len), + ); + } else { + const llvm_kind_id = old_attribute_kind.toLlvm(builder).*; + assert(llvm_kind_id != 0); + llvm_function.removeEnumAttributeAtIndex( + llvm_attribute_index, + llvm_kind_id, + ); + } + old_attribute_index += 1; + continue; + }, + .eq => { + // Iteration finished + if (old_attribute_kind == .none) break; + // No change + if (old_attributes_slice[old_attribute_index] == + new_attributes_slice[new_attribute_index]) + { + old_attribute_index += 1; + new_attribute_index += 1; + continue; + } + old_attribute_index += 1; + }, + .gt => {}, + } + // New or changed + llvm_function.addAttributeAtIndex( + llvm_attribute_index, + new_attributes_slice[new_attribute_index].toLlvm(builder), + ); + new_attribute_index += 1; + } + } + } + self.ptr(builder).attributes = new_function_attributes; + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -5048,9 +5136,8 @@ pub const WipFunction = struct { .tail, .tail_fast => .Tail, }); for (0.., function_attributes.slice(self.builder)) |index, attributes| { - const attribute_index = @as(llvm.AttributeIndex, @intCast(index)) -% 1; for (attributes.slice(self.builder)) |attribute| llvm_instruction.addCallSiteAttribute( - attribute_index, + @as(llvm.AttributeIndex, @intCast(index)) -% 1, attribute.toLlvm(self.builder), ); } @@ -7368,16 +7455,16 @@ pub fn attr(self: *Builder, attribute: Attribute) Allocator.Error!Attribute.Inde gop.value_ptr.* = {}; if (self.useLibLlvm()) self.llvm.attributes.appendAssumeCapacity(switch (attribute) { else => llvm_attr: { - const kind_id = &self.llvm.attribute_kind_ids.?[@intFromEnum(attribute)]; - if (kind_id.* == 0) { + const llvm_kind_id = attribute.getKind().toLlvm(self); + if (llvm_kind_id.* == 0) { const name = @tagName(attribute); - kind_id.* = llvm.getEnumAttributeKindForName(name.ptr, name.len); - assert(kind_id.* != 0); + llvm_kind_id.* = llvm.getEnumAttributeKindForName(name.ptr, name.len); + assert(llvm_kind_id.* != 0); } break :llvm_attr switch (attribute) { else => switch (attribute) { inline else => |value| self.llvm.context.createEnumAttribute( - kind_id.*, + llvm_kind_id.*, switch (@TypeOf(value)) { void => 0, u32 => value, @@ -7411,7 +7498,7 @@ pub fn attr(self: *Builder, attribute: Attribute) Allocator.Error!Attribute.Inde .inalloca, .sret, .elementtype, - => |ty| self.llvm.context.createTypeAttribute(kind_id.*, ty.toLlvm(self)), + => |ty| self.llvm.context.createTypeAttribute(llvm_kind_id.*, ty.toLlvm(self)), .string, .none => unreachable, }; }, diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 9220c30a09..6e7027c341 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -115,12 +115,15 @@ pub const Context = opaque { }; pub const Value = opaque { - pub const addAttributeAtIndex = ZigLLVMAddAttributeAtIndex; - extern fn ZigLLVMAddAttributeAtIndex(*Value, Idx: AttributeIndex, A: *Attribute) void; + pub const addAttributeAtIndex = LLVMAddAttributeAtIndex; + extern fn LLVMAddAttributeAtIndex(F: *Value, Idx: AttributeIndex, A: *Attribute) void; pub const removeEnumAttributeAtIndex = LLVMRemoveEnumAttributeAtIndex; extern fn LLVMRemoveEnumAttributeAtIndex(F: *Value, Idx: AttributeIndex, KindID: c_uint) void; + pub const removeStringAttributeAtIndex = LLVMRemoveStringAttributeAtIndex; + extern fn LLVMRemoveStringAttributeAtIndex(F: *Value, Idx: AttributeIndex, K: [*]const u8, KLen: c_uint) void; + pub const getFirstBasicBlock = LLVMGetFirstBasicBlock; extern fn LLVMGetFirstBasicBlock(Fn: *Value) ?*BasicBlock; @@ -342,9 +345,6 @@ pub const Value = opaque { pub const deleteFunction = LLVMDeleteFunction; extern fn LLVMDeleteFunction(Fn: *Value) void; - pub const addSretAttr = ZigLLVMAddSretAttr; - extern fn ZigLLVMAddSretAttr(fn_ref: *Value, type_val: *Type) void; - pub const getParam = LLVMGetParam; extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value; @@ -369,9 +369,6 @@ pub const Value = opaque { pub const getAlignment = LLVMGetAlignment; extern fn LLVMGetAlignment(V: *Value) c_uint; - pub const addFunctionAttr = ZigLLVMAddFunctionAttr; - extern fn ZigLLVMAddFunctionAttr(Fn: *Value, attr_name: [*:0]const u8, attr_value: [*:0]const u8) void; - pub const addByValAttr = ZigLLVMAddByValAttr; extern fn ZigLLVMAddByValAttr(Fn: *Value, ArgNo: c_uint, type: *Type) void; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 5b599d8e0e..5d29bbb595 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -408,14 +408,6 @@ void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCall unwrap(Call)->setTailCallKind(TCK); } -void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A) { - if (isa(unwrap(Val))) { - unwrap(Val)->addAttributeAtIndex(Idx, unwrap(A)); - } else { - unwrap(Val)->addAttributeAtIndex(Idx, unwrap(A)); - } -} - LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile) { @@ -950,36 +942,6 @@ void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state) { } } -void ZigLLVMAddByValAttr(LLVMValueRef Val, unsigned ArgNo, LLVMTypeRef type_val) { - if (isa(unwrap(Val))) { - Function *func = unwrap(Val); - AttrBuilder attr_builder(func->getContext()); - Type *llvm_type = unwrap(type_val); - attr_builder.addByValAttr(llvm_type); - func->addParamAttrs(ArgNo, attr_builder); - } else { - CallInst *call = unwrap(Val); - AttrBuilder attr_builder(call->getContext()); - Type *llvm_type = unwrap(type_val); - attr_builder.addByValAttr(llvm_type); - // NOTE: +1 here since index 0 refers to the return value - call->addAttributeAtIndex(ArgNo + 1, attr_builder.getAttribute(Attribute::ByVal)); - } -} - -void ZigLLVMAddSretAttr(LLVMValueRef fn_ref, LLVMTypeRef type_val) { - Function *func = unwrap(fn_ref); - AttrBuilder attr_builder(func->getContext()); - Type *llvm_type = unwrap(type_val); - attr_builder.addStructRetAttr(llvm_type); - func->addParamAttrs(0, attr_builder); -} - -void ZigLLVMAddFunctionAttr(LLVMValueRef fn_ref, const char *attr_name, const char *attr_value) { - Function *func = unwrap(fn_ref); - func->addFnAttr(attr_name, attr_value); -} - void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv) { cl::ParseCommandLineOptions(argc, argv); } @@ -1172,14 +1134,6 @@ bool ZigLLDLinkWasm(int argc, const char **argv, bool can_exit_early, bool disab return lld::wasm::link(args, llvm::outs(), llvm::errs(), can_exit_early, disable_output); } -inline LLVMAttributeRef wrap(Attribute Attr) { - return reinterpret_cast(Attr.getRawPointer()); -} - -inline Attribute unwrap(LLVMAttributeRef Attr) { - return Attribute::fromRawPointer(Attr); -} - LLVMValueRef ZigLLVMBuildAndReduce(LLVMBuilderRef B, LLVMValueRef Val) { return wrap(unwrap(B)->CreateAndReduce(unwrap(Val))); } diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 258e846032..97fc7a627f 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -122,15 +122,6 @@ enum ZigLLVM_CallingConv { ZigLLVM_MaxID = 1023, }; -enum ZigLLVM_CallAttr { - ZigLLVM_CallAttrAuto, - ZigLLVM_CallAttrNeverTail, - ZigLLVM_CallAttrNeverInline, - ZigLLVM_CallAttrAlwaysTail, - ZigLLVM_CallAttrAlwaysInline, -}; -ZIG_EXTERN_C void ZigLLVMAddAttributeAtIndex(LLVMValueRef Val, unsigned Idx, LLVMAttributeRef A); - ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile); @@ -301,11 +292,6 @@ ZIG_EXTERN_C LLVMValueRef ZigLLVMInsertDbgValueIntrinsicAtEnd(struct ZigLLVMDIBu ZIG_EXTERN_C void ZigLLVMSetFastMath(LLVMBuilderRef builder_wrapped, bool on_state); -ZIG_EXTERN_C void ZigLLVMAddFunctionAttr(LLVMValueRef fn, const char *attr_name, const char *attr_value); -ZIG_EXTERN_C void ZigLLVMAddByValAttr(LLVMValueRef fn_ref, unsigned ArgNo, LLVMTypeRef type_val); -ZIG_EXTERN_C void ZigLLVMAddSretAttr(LLVMValueRef fn_ref, LLVMTypeRef type_val); -ZIG_EXTERN_C void ZigLLVMAddFunctionAttrCold(LLVMValueRef fn); - ZIG_EXTERN_C void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv); ZIG_EXTERN_C ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression); -- cgit v1.2.3 From e1efd4d3c22cad89ed89ea152c92b1c3260ddb8a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 04:23:02 -0400 Subject: Builder: simplify intrinsic table This format removes back-references allowing it to be parsed in a single pass. It also reduces the number of kinds, simplifying the code. --- src/codegen/llvm/Builder.zig | 195 ++++++++++++++++++++++++++++++------------- 1 file changed, 139 insertions(+), 56 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index c7e0937d41..8c54742a86 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2462,7 +2462,8 @@ pub const Intrinsic = enum { @"wasm.memory.grow", const Signature = struct { - params: []const Parameter = &.{}, + ret_len: u8, + params: []const Parameter, attrs: []const Attribute = &.{}, const Parameter = struct { @@ -2471,33 +2472,37 @@ pub const Intrinsic = enum { const Kind = union(enum) { type: Type, + change_scalar: struct { + index: u8, + scalar: Type, + }, overloaded, - overloaded_tuple: u8, matches: u8, - matches_tuple: packed struct { param: u4, field: u4 }, - matches_with_overflow: u8, }; }; }; - const signatures = std.enums.EnumArray(Intrinsic, Signature).initDefault(.{}, .{ + const signatures = std.enums.EnumArray(Intrinsic, Signature).initDefault(.{ + .ret_len = 0, + .params = &.{}, + }, .{ .va_start = .{ + .ret_len = 0, .params = &.{ - .{ .kind = .{ .type = .void } }, .{ .kind = .{ .type = .ptr } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, }, .va_end = .{ + .ret_len = 0, .params = &.{ - .{ .kind = .{ .type = .void } }, .{ .kind = .{ .type = .ptr } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, }, .va_copy = .{ + .ret_len = 0, .params = &.{ - .{ .kind = .{ .type = .void } }, .{ .kind = .{ .type = .ptr } }, .{ .kind = .{ .type = .ptr } }, }, @@ -2505,6 +2510,7 @@ pub const Intrinsic = enum { }, .returnaddress = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .ptr } }, .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, @@ -2512,18 +2518,21 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .addressofreturnaddress = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .sponentry = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .frameaddress = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, @@ -2531,8 +2540,8 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .prefetch = .{ + .ret_len = 0, .params = &.{ - .{ .kind = .{ .type = .void } }, .{ .kind = .overloaded, .attrs = &.{ .nocapture, .readonly } }, .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, @@ -2541,6 +2550,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.readwrite) } }, }, .@"thread.pointer" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .ptr } }, }, @@ -2548,6 +2558,7 @@ pub const Intrinsic = enum { }, .abs = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2556,6 +2567,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .smax = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2564,6 +2576,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .smin = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2572,6 +2585,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .umax = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2580,6 +2594,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .umin = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2588,6 +2603,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .sqrt = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2595,6 +2611,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .powi = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2603,6 +2620,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .sin = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2610,6 +2628,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .cos = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2617,6 +2636,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .pow = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2625,6 +2645,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .exp = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2632,6 +2653,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .exp2 = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2639,6 +2661,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .ldexp = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2647,13 +2670,16 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .frexp = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .overloaded_tuple = 2 } }, - .{ .kind = .{ .matches_tuple = .{ .param = 0, .field = 0 } } }, + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .log = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2661,6 +2687,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .log10 = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2668,6 +2695,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .log2 = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2675,6 +2703,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .fma = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2684,6 +2713,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .fabs = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2691,6 +2721,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .minnum = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2699,6 +2730,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .maxnum = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2707,6 +2739,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .minimum = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2715,6 +2748,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .maximum = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2723,6 +2757,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .copysign = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2731,6 +2766,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .floor = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2738,6 +2774,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .ceil = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2745,6 +2782,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .trunc = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2752,6 +2790,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .rint = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2759,6 +2798,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .nearbyint = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2766,6 +2806,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .round = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2773,6 +2814,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .roundeven = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2780,6 +2822,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .lround = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .overloaded }, @@ -2787,6 +2830,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .llround = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .overloaded }, @@ -2794,6 +2838,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .lrint = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .overloaded }, @@ -2801,6 +2846,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .llrint = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .overloaded }, @@ -2809,6 +2855,7 @@ pub const Intrinsic = enum { }, .bitreverse = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2816,6 +2863,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .bswap = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2823,6 +2871,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .ctpop = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2830,6 +2879,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .ctlz = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2838,6 +2888,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .cttz = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2846,6 +2897,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .fshl = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2855,6 +2907,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .fshr = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2865,55 +2918,68 @@ pub const Intrinsic = enum { }, .@"sadd.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"uadd.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"ssub.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"usub.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"smul.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"umul.with.overflow" = .{ + .ret_len = 2, .params = &.{ - .{ .kind = .{ .matches_with_overflow = 1 } }, .{ .kind = .overloaded }, - .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"sadd.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2922,6 +2988,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"uadd.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2930,6 +2997,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"ssub.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2938,6 +3006,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"usub.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2946,6 +3015,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"sshl.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2954,6 +3024,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"ushl.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2963,6 +3034,7 @@ pub const Intrinsic = enum { }, .@"smul.fix" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2972,6 +3044,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"umul.fix" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2981,6 +3054,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"smul.fix.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2990,6 +3064,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"umul.fix.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -2999,6 +3074,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"sdiv.fix" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -3008,6 +3084,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"udiv.fix" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -3017,6 +3094,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"sdiv.fix.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -3026,6 +3104,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"udiv.fix.sat" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, @@ -3036,62 +3115,67 @@ pub const Intrinsic = enum { }, .trap = .{ - .params = &.{ - .{ .kind = .{ .type = .void } }, - }, + .ret_len = 0, + .params = &.{}, .attrs = &.{ .cold, .noreturn, .nounwind, .{ .memory = .{ .inaccessiblemem = .write } } }, }, .debugtrap = .{ - .params = &.{ - .{ .kind = .{ .type = .void } }, - }, + .ret_len = 0, + .params = &.{}, .attrs = &.{.nounwind}, }, .ubsantrap = .{ + .ret_len = 0, .params = &.{ - .{ .kind = .{ .type = .void } }, .{ .kind = .{ .type = .i8 }, .attrs = &.{.immarg} }, }, .attrs = &.{ .cold, .noreturn, .nounwind }, }, .@"amdgcn.workitem.id.x" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.workitem.id.y" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.workitem.id.z" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.workgroup.id.x" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.workgroup.id.y" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.workgroup.id.z" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = .i32 } }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"amdgcn.dispatch.ptr" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .{ .type = Type.ptr_amdgpu_constant }, @@ -3102,6 +3186,7 @@ pub const Intrinsic = enum { }, .@"wasm.memory.size" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .type = .i32 } }, @@ -3109,6 +3194,7 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, .@"wasm.memory.grow" = .{ + .ret_len = 1, .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .type = .i32 } }, @@ -7627,8 +7713,10 @@ pub fn getIntrinsic( const signature = Intrinsic.signatures.get(id); const param_types = try allocator.alloc(Type, signature.params.len); defer allocator.free(param_types); - const function_attributes = - try allocator.alloc(Attributes, FunctionAttributes.return_index + signature.params.len); + const function_attributes = try allocator.alloc( + Attributes, + FunctionAttributes.params_index + (signature.params.len - signature.ret_len), + ); defer allocator.free(function_attributes); var attributes: struct { @@ -7651,42 +7739,37 @@ pub fn getIntrinsic( var overload_index: usize = 0; function_attributes[FunctionAttributes.function_index] = try attributes.get(signature.attrs); - for ( - param_types, - function_attributes[FunctionAttributes.return_index..], - signature.params, - ) |*param_type, *param_attributes, signature_param| { + for (0.., param_types, signature.params) |param_index, *param_type, signature_param| { switch (signature_param.kind) { .type => |ty| param_type.* = ty, + .change_scalar => |info| { + assert(info.index < param_index); + param_type.* = try param_types[info.index].changeScalar(info.scalar, self); + }, .overloaded => { param_type.* = overload[overload_index]; overload_index += 1; }, - .overloaded_tuple => |len| { - const fields = try allocator.alloc(Type, len); - defer allocator.free(fields); - for (fields, overload[overload_index..][0..len]) |*field, ty| field.* = ty; - param_type.* = try self.structType(.normal, fields); - overload_index += len; + .matches => |index| { + assert(index < param_index); + param_type.* = param_types[index]; }, - .matches, .matches_tuple, .matches_with_overflow => {}, } - param_attributes.* = try attributes.get(signature_param.attrs); + function_attributes[ + if (param_index < signature.ret_len) + FunctionAttributes.return_index + else + FunctionAttributes.params_index + (param_index - signature.ret_len) + ] = try attributes.get(signature_param.attrs); } assert(overload_index == overload.len); - for (param_types, signature.params) |*param_type, signature_param| switch (signature_param.kind) { - .type, .overloaded, .overloaded_tuple => {}, - .matches => |param_index| param_type.* = param_types[param_index], - .matches_tuple => |tuple| param_type.* = - param_types[tuple.param].structFields(self)[tuple.field], - .matches_with_overflow => |param_index| { - const ty = param_types[param_index]; - param_type.* = try self.structType(.normal, &.{ ty, try ty.changeScalar(.i1, self) }); - }, - }; const function_index = - try self.addFunction(try self.fnType(param_types[0], param_types[1..], .normal), name); + try self.addFunction(try self.fnType(switch (signature.ret_len) { + 0 => .void, + 1 => param_types[0], + else => try self.structType(.normal, param_types[0..signature.ret_len]), + }, param_types[signature.ret_len..], .normal), name); function_index.ptr(self).attributes = try self.fnAttrs(function_attributes); return function_index; } -- cgit v1.2.3 From 6577f52614e21dc43ccc2eb9ab80eeaa4fde9cd4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 05:24:14 -0400 Subject: llvm: convert vector reduction intrinsics Scratch that thing I said about one pass. :) --- src/codegen/llvm.zig | 113 +++++++++++--------------- src/codegen/llvm/Builder.zig | 179 ++++++++++++++++++++++++++++++++++++++---- src/codegen/llvm/bindings.zig | 33 -------- src/zig_llvm.cpp | 44 ----------- src/zig_llvm.h | 12 --- 5 files changed, 208 insertions(+), 173 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ce892b2d03..30164548be 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7464,18 +7464,16 @@ pub const FuncGen = struct { const llvm_inst_ty = try o.lowerType(inst_ty); const results = try fg.wip.callIntrinsic(intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); - const overflow_bit = try fg.wip.extractValue(results, &.{1}, ""); - const scalar_overflow_bit = if (llvm_inst_ty.isVector(&o.builder)) - (try fg.wip.unimplemented(.i1, "")).finish( - fg.builder.buildOrReduce(overflow_bit.toLlvm(&fg.wip)), - &fg.wip, - ) + const overflow_bits = try fg.wip.extractValue(results, &.{1}, ""); + const overflow_bits_ty = overflow_bits.typeOfWip(&fg.wip); + const overflow_bit = if (overflow_bits_ty.isVector(&o.builder)) + try fg.wip.callIntrinsic(.@"vector.reduce.or", &.{overflow_bits_ty}, &.{overflow_bits}, "") else - overflow_bit; + overflow_bits; const fail_block = try fg.wip.block(1, "OverflowFail"); const ok_block = try fg.wip.block(1, "OverflowOk"); - _ = try fg.wip.brCond(scalar_overflow_bit, fail_block, ok_block); + _ = try fg.wip.brCond(overflow_bit, fail_block, ok_block); fg.wip.cursor = .{ .block = fail_block }; try fg.buildSimplePanic(.integer_overflow); @@ -9643,72 +9641,53 @@ pub const FuncGen = struct { const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); const operand_ty = self.typeOf(reduce.operand); + const llvm_operand_ty = try o.lowerType(operand_ty); const scalar_ty = self.typeOfIndex(inst); const llvm_scalar_ty = try o.lowerType(scalar_ty); switch (reduce.operation) { - .And => return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildAndReduce(operand.toLlvm(&self.wip)), &self.wip), - .Or => return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildOrReduce(operand.toLlvm(&self.wip)), &self.wip), - .Xor => return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildXorReduce(operand.toLlvm(&self.wip)), &self.wip), - .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( - self.builder.buildIntMinReduce( - operand.toLlvm(&self.wip), - scalar_ty.isSignedInt(mod), - ), - &self.wip, - ), - .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildFPMinReduce(operand.toLlvm(&self.wip)), &self.wip); - }, + .And, .Or, .Xor => return self.wip.callIntrinsic(switch (reduce.operation) { + .And => .@"vector.reduce.and", + .Or => .@"vector.reduce.or", + .Xor => .@"vector.reduce.xor", else => unreachable, - }, - .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( - self.builder.buildIntMaxReduce( - operand.toLlvm(&self.wip), - scalar_ty.isSignedInt(mod), - ), - &self.wip, - ), - .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildFPMaxReduce(operand.toLlvm(&self.wip)), &self.wip); - }, - else => unreachable, - }, - .Add => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildAddReduce(operand.toLlvm(&self.wip)), &self.wip), - .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const neutral_value = try o.builder.fpConst(llvm_scalar_ty, -0.0); - return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( - self.builder.buildFPAddReduce( - neutral_value.toLlvm(&o.builder), - operand.toLlvm(&self.wip), - ), - &self.wip, - ); - }, + }, &.{llvm_operand_ty}, &.{operand}, ""), + .Min, .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.wip.callIntrinsic(switch (reduce.operation) { + .Min => if (scalar_ty.isSignedInt(mod)) + .@"vector.reduce.smin" + else + .@"vector.reduce.umin", + .Max => if (scalar_ty.isSignedInt(mod)) + .@"vector.reduce.smax" + else + .@"vector.reduce.umax", + else => unreachable, + }, &.{llvm_operand_ty}, &.{operand}, ""), + .Float => if (intrinsicsAllowed(scalar_ty, target)) + return self.wip.callIntrinsic(switch (reduce.operation) { + .Min => .@"vector.reduce.fmin", + .Max => .@"vector.reduce.fmax", + else => unreachable, + }, &.{llvm_operand_ty}, &.{operand}, ""), else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) - .finish(self.builder.buildMulReduce(operand.toLlvm(&self.wip)), &self.wip), - .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const neutral_value = try o.builder.fpConst(llvm_scalar_ty, 1.0); - return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( - self.builder.buildFPMulReduce( - neutral_value.toLlvm(&o.builder), - operand.toLlvm(&self.wip), - ), - &self.wip, - ); - }, + .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.wip.callIntrinsic(switch (reduce.operation) { + .Add => .@"vector.reduce.add", + .Mul => .@"vector.reduce.mul", + else => unreachable, + }, &.{llvm_operand_ty}, &.{operand}, ""), + .Float => if (intrinsicsAllowed(scalar_ty, target)) + return self.wip.callIntrinsic(switch (reduce.operation) { + .Add => .@"vector.reduce.fadd", + .Mul => .@"vector.reduce.fmul", + else => unreachable, + }, &.{llvm_operand_ty}, &.{ switch (reduce.operation) { + .Add => try o.builder.fpValue(llvm_scalar_ty, -0.0), + .Mul => try o.builder.fpValue(llvm_scalar_ty, 1.0), + else => unreachable, + }, operand }, ""), else => unreachable, }, } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 8c54742a86..0296fbd147 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2472,12 +2472,13 @@ pub const Intrinsic = enum { const Kind = union(enum) { type: Type, - change_scalar: struct { + overloaded, + matches: u8, + matches_scalar: u8, + matches_changed_scalar: struct { index: u8, scalar: Type, }, - overloaded, - matches: u8, }; }; }; @@ -2921,7 +2922,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -2931,7 +2932,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -2941,7 +2942,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -2951,7 +2952,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -2961,7 +2962,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -2971,7 +2972,7 @@ pub const Intrinsic = enum { .ret_len = 2, .params = &.{ .{ .kind = .overloaded }, - .{ .kind = .{ .change_scalar = .{ .index = 0, .scalar = .i1 } } }, + .{ .kind = .{ .matches_changed_scalar = .{ .index = 0, .scalar = .i1 } } }, .{ .kind = .{ .matches = 0 } }, .{ .kind = .{ .matches = 0 } }, }, @@ -3114,6 +3115,148 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, + .@"vector.reduce.add" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fadd" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 2 } }, + .{ .kind = .{ .matches_scalar = 2 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.mul" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fmul" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 2 } }, + .{ .kind = .{ .matches_scalar = 2 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.and" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.or" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.xor" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.smax" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.smin" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.umax" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.umin" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fmax" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fmin" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fmaximum" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.fminimum" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_scalar = 1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.insert" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i64 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.extract" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i64 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .trap = .{ .ret_len = 0, .params = &.{}, @@ -7742,18 +7885,11 @@ pub fn getIntrinsic( for (0.., param_types, signature.params) |param_index, *param_type, signature_param| { switch (signature_param.kind) { .type => |ty| param_type.* = ty, - .change_scalar => |info| { - assert(info.index < param_index); - param_type.* = try param_types[info.index].changeScalar(info.scalar, self); - }, .overloaded => { param_type.* = overload[overload_index]; overload_index += 1; }, - .matches => |index| { - assert(index < param_index); - param_type.* = param_types[index]; - }, + .matches, .matches_scalar, .matches_changed_scalar => {}, } function_attributes[ if (param_index < signature.ret_len) @@ -7763,6 +7899,15 @@ pub fn getIntrinsic( ] = try attributes.get(signature_param.attrs); } assert(overload_index == overload.len); + for (param_types, signature.params) |*param_type, signature_param| { + param_type.* = switch (signature_param.kind) { + .type, .overloaded => continue, + .matches => |param_index| param_types[param_index], + .matches_scalar => |param_index| param_types[param_index].scalarType(self), + .matches_changed_scalar => |info| try param_types[info.index] + .changeScalar(info.scalar, self), + }; + } const function_index = try self.addFunction(try self.fnType(switch (signature.ret_len) { diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 6e7027c341..d60ccb85bb 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -993,39 +993,6 @@ pub const Builder = opaque { pub const buildShuffleVector = LLVMBuildShuffleVector; extern fn LLVMBuildShuffleVector(*Builder, V1: *Value, V2: *Value, Mask: *Value, Name: [*:0]const u8) *Value; - pub const buildAndReduce = ZigLLVMBuildAndReduce; - extern fn ZigLLVMBuildAndReduce(B: *Builder, Val: *Value) *Value; - - pub const buildOrReduce = ZigLLVMBuildOrReduce; - extern fn ZigLLVMBuildOrReduce(B: *Builder, Val: *Value) *Value; - - pub const buildXorReduce = ZigLLVMBuildXorReduce; - extern fn ZigLLVMBuildXorReduce(B: *Builder, Val: *Value) *Value; - - pub const buildIntMaxReduce = ZigLLVMBuildIntMaxReduce; - extern fn ZigLLVMBuildIntMaxReduce(B: *Builder, Val: *Value, is_signed: bool) *Value; - - pub const buildIntMinReduce = ZigLLVMBuildIntMinReduce; - extern fn ZigLLVMBuildIntMinReduce(B: *Builder, Val: *Value, is_signed: bool) *Value; - - pub const buildFPMaxReduce = ZigLLVMBuildFPMaxReduce; - extern fn ZigLLVMBuildFPMaxReduce(B: *Builder, Val: *Value) *Value; - - pub const buildFPMinReduce = ZigLLVMBuildFPMinReduce; - extern fn ZigLLVMBuildFPMinReduce(B: *Builder, Val: *Value) *Value; - - pub const buildAddReduce = ZigLLVMBuildAddReduce; - extern fn ZigLLVMBuildAddReduce(B: *Builder, Val: *Value) *Value; - - pub const buildMulReduce = ZigLLVMBuildMulReduce; - extern fn ZigLLVMBuildMulReduce(B: *Builder, Val: *Value) *Value; - - pub const buildFPAddReduce = ZigLLVMBuildFPAddReduce; - extern fn ZigLLVMBuildFPAddReduce(B: *Builder, Acc: *Value, Val: *Value) *Value; - - pub const buildFPMulReduce = ZigLLVMBuildFPMulReduce; - extern fn ZigLLVMBuildFPMulReduce(B: *Builder, Acc: *Value, Val: *Value) *Value; - pub const setFastMath = ZigLLVMSetFastMath; extern fn ZigLLVMSetFastMath(B: *Builder, on_state: bool) void; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 5d29bbb595..cdca81c218 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -1134,50 +1134,6 @@ bool ZigLLDLinkWasm(int argc, const char **argv, bool can_exit_early, bool disab return lld::wasm::link(args, llvm::outs(), llvm::errs(), can_exit_early, disable_output); } -LLVMValueRef ZigLLVMBuildAndReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateAndReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildOrReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateOrReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildXorReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateXorReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildIntMaxReduce(LLVMBuilderRef B, LLVMValueRef Val, bool is_signed) { - return wrap(unwrap(B)->CreateIntMaxReduce(unwrap(Val), is_signed)); -} - -LLVMValueRef ZigLLVMBuildIntMinReduce(LLVMBuilderRef B, LLVMValueRef Val, bool is_signed) { - return wrap(unwrap(B)->CreateIntMinReduce(unwrap(Val), is_signed)); -} - -LLVMValueRef ZigLLVMBuildFPMaxReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateFPMaxReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildFPMinReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateFPMinReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildAddReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateAddReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildMulReduce(LLVMBuilderRef B, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateMulReduce(unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildFPAddReduce(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc), unwrap(Val))); -} - -LLVMValueRef ZigLLVMBuildFPMulReduce(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Val) { - return wrap(unwrap(B)->CreateFMulReduce(unwrap(Acc), unwrap(Val))); -} - void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim) { unwrap(new_owner)->takeName(unwrap(victim)); } diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 97fc7a627f..f8ac0bf88d 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -497,18 +497,6 @@ enum ZigLLVM_ObjectFormatType { ZigLLVM_XCOFF, }; -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildAndReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildOrReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildXorReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildIntMaxReduce(LLVMBuilderRef B, LLVMValueRef Val, bool is_signed); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildIntMinReduce(LLVMBuilderRef B, LLVMValueRef Val, bool is_signed); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFPMaxReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFPMinReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildAddReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMulReduce(LLVMBuilderRef B, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFPAddReduce(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Val); -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildFPMulReduce(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Val); - ZIG_EXTERN_C void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim); #define ZigLLVM_DIFlags_Zero 0U -- cgit v1.2.3 From 49cc1bff086e9c521c110b35692a87f75e25c7ad Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 07:45:36 -0400 Subject: llvm: finish converting intrinsics --- src/codegen/llvm.zig | 381 +++++++++++++++++++++--------------------- src/codegen/llvm/Builder.zig | 346 ++++++++++++++++++++++++++++++++++++-- src/codegen/llvm/bindings.zig | 21 --- src/value.zig | 4 +- src/zig_llvm.cpp | 16 -- src/zig_llvm.h | 6 - 6 files changed, 525 insertions(+), 249 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 30164548be..47cfe58904 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5466,7 +5466,7 @@ pub const FuncGen = struct { const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - _ = try self.wip.callIntrinsic(.va_copy, &.{}, &.{ dest_list, src_list }, ""); + _ = try self.wip.callIntrinsic(.none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); return if (isByRef(va_list_ty, mod)) dest_list else @@ -5477,7 +5477,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const src_list = try self.resolveInst(un_op); - _ = try self.wip.callIntrinsic(.va_end, &.{}, &.{src_list}, ""); + _ = try self.wip.callIntrinsic(.none, .va_end, &.{}, &.{src_list}, ""); return .none; } @@ -5490,7 +5490,7 @@ pub const FuncGen = struct { const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - _ = try self.wip.callIntrinsic(.va_start, &.{}, &.{dest_list}, ""); + _ = try self.wip.callIntrinsic(.none, .va_start, &.{}, &.{dest_list}, ""); return if (isByRef(va_list_ty, mod)) dest_list else @@ -5600,8 +5600,8 @@ pub const FuncGen = struct { const both_pl_block_end = self.wip.cursor.block; self.wip.cursor = .{ .block = end_block }; - const llvm_i1_0 = try o.builder.intValue(.i1, 0); - const llvm_i1_1 = try o.builder.intValue(.i1, 1); + const llvm_i1_0 = Builder.Value.false; + const llvm_i1_1 = Builder.Value.true; const incoming_values: [3]Builder.Value = .{ switch (op) { .eq => llvm_i1_1, @@ -5822,7 +5822,7 @@ pub const FuncGen = struct { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); + return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } const load_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; return fg.wip.load(.normal, load_ty, payload_ptr, payload_alignment, ""); @@ -6121,7 +6121,7 @@ pub const FuncGen = struct { return ptr; const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - return self.loadByRef(ptr, elem_ty, elem_alignment, false); + return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } return self.load(ptr, slice_ty); @@ -6161,7 +6161,7 @@ pub const FuncGen = struct { try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); if (canElideLoad(self, body_tail)) return elem_ptr; const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - return self.loadByRef(elem_ptr, elem_ty, elem_alignment, false); + return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal); } else { const elem_llvm_ty = try o.lowerType(elem_ty); if (Air.refToIndex(bin_op.lhs)) |lhs_index| { @@ -6221,7 +6221,7 @@ pub const FuncGen = struct { if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - return self.loadByRef(ptr, elem_ty, elem_alignment, false); + return self.loadByRef(ptr, elem_ty, elem_alignment, .normal); } return self.load(ptr, ptr_ty); @@ -6351,7 +6351,7 @@ pub const FuncGen = struct { assert(llvm_field.alignment != 0); const field_alignment = Builder.Alignment.fromByteUnits(llvm_field.alignment); - return self.loadByRef(field_ptr, field_ty, field_alignment, false); + return self.loadByRef(field_ptr, field_ty, field_alignment, .normal); } else { return self.load(field_ptr, field_ptr_ty); } @@ -6366,7 +6366,7 @@ pub const FuncGen = struct { const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, payload_alignment, false); + return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal); } else { return self.wip.load(.normal, llvm_field_ty, field_ptr, payload_alignment, ""); } @@ -7150,7 +7150,7 @@ pub const FuncGen = struct { const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); if (isByRef(payload_ty, mod)) { if (self.canElideLoad(body_tail)) return payload_ptr; - return self.loadByRef(payload_ptr, payload_ty, payload_alignment, false); + return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; return self.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); @@ -7346,7 +7346,7 @@ pub const FuncGen = struct { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - return self.wip.callIntrinsic(.@"wasm.memory.size", &.{.i32}, &.{ + return self.wip.callIntrinsic(.none, .@"wasm.memory.size", &.{.i32}, &.{ try o.builder.intValue(.i32, index), }, ""); } @@ -7355,7 +7355,7 @@ pub const FuncGen = struct { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - return self.wip.callIntrinsic(.@"wasm.memory.grow", &.{.i32}, &.{ + return self.wip.callIntrinsic(.none, .@"wasm.memory.grow", &.{.i32}, &.{ try o.builder.intValue(.i32, index), try self.resolveInst(pl_op.operand), }, ""); } @@ -7371,13 +7371,11 @@ pub const FuncGen = struct { const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); - const kind: Builder.MemoryAccessKind = switch (vector_ptr_ty.isVolatilePtr(mod)) { - false => .normal, - true => .@"volatile", - }; + const access_kind: Builder.MemoryAccessKind = + if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod)); - const loaded = try self.wip.load(kind, elem_llvm_ty, vector_ptr, alignment, ""); + const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, ""); const new_vector = try self.wip.insertElement(loaded, operand, index, ""); _ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none); @@ -7395,6 +7393,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, inst_ty, 2, .{ lhs, rhs }); return self.wip.callIntrinsic( + .none, if (scalar_ty.isSignedInt(mod)) .smin else .umin, &.{try o.lowerType(inst_ty)}, &.{ lhs, rhs }, @@ -7413,6 +7412,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, inst_ty, 2, .{ lhs, rhs }); return self.wip.callIntrinsic( + .none, if (scalar_ty.isSignedInt(mod)) .smax else .umax, &.{try o.lowerType(inst_ty)}, &.{ lhs, rhs }, @@ -7462,12 +7462,19 @@ pub const FuncGen = struct { const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_inst_ty = try o.lowerType(inst_ty); - const results = try fg.wip.callIntrinsic(intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); + const results = + try fg.wip.callIntrinsic(.none, intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); const overflow_bits = try fg.wip.extractValue(results, &.{1}, ""); const overflow_bits_ty = overflow_bits.typeOfWip(&fg.wip); const overflow_bit = if (overflow_bits_ty.isVector(&o.builder)) - try fg.wip.callIntrinsic(.@"vector.reduce.or", &.{overflow_bits_ty}, &.{overflow_bits}, "") + try fg.wip.callIntrinsic( + .none, + .@"vector.reduce.or", + &.{overflow_bits_ty}, + &.{overflow_bits}, + "", + ) else overflow_bits; @@ -7501,6 +7508,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); return self.wip.callIntrinsic( + .none, if (scalar_ty.isSignedInt(mod)) .@"sadd.sat" else .@"uadd.sat", &.{try o.lowerType(inst_ty)}, &.{ lhs, rhs }, @@ -7542,6 +7550,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); return self.wip.callIntrinsic( + .none, if (scalar_ty.isSignedInt(mod)) .@"ssub.sat" else .@"usub.sat", &.{try o.lowerType(inst_ty)}, &.{ lhs, rhs }, @@ -7583,6 +7592,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); return self.wip.callIntrinsic( + .none, if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat", &.{try o.lowerType(inst_ty)}, &.{ lhs, rhs, try o.builder.intValue(.i32, 0) }, @@ -7793,7 +7803,8 @@ pub const FuncGen = struct { const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_inst_ty = try o.lowerType(inst_ty); const llvm_lhs_ty = try o.lowerType(lhs_ty); - const results = try self.wip.callIntrinsic(intrinsic, &.{llvm_lhs_ty}, &.{ lhs, rhs }, ""); + const results = + try self.wip.callIntrinsic(.none, intrinsic, &.{llvm_lhs_ty}, &.{ lhs, rhs }, ""); const result_val = try self.wip.extractValue(results, &.{0}, ""); const overflow_bit = try self.wip.extractValue(results, &.{1}, ""); @@ -7998,27 +8009,49 @@ pub const FuncGen = struct { if (op != .tan and intrinsicsAllowed(scalar_ty, target)) switch (op) { // Some operations are dedicated LLVM instructions, not available as intrinsics .neg => return self.wip.un(.fneg, params[0], ""), - .add => return self.wip.bin(.fadd, params[0], params[1], ""), - .sub => return self.wip.bin(.fsub, params[0], params[1], ""), - .mul => return self.wip.bin(.fmul, params[0], params[1], ""), - .div => return self.wip.bin(.fdiv, params[0], params[1], ""), - .fmod => return self.wip.bin(.frem, params[0], params[1], ""), - .fmax => return self.wip.callIntrinsic(.maxnum, &.{llvm_ty}, ¶ms, ""), - .fmin => return self.wip.callIntrinsic(.minnum, &.{llvm_ty}, ¶ms, ""), - .ceil => return self.wip.callIntrinsic(.ceil, &.{llvm_ty}, ¶ms, ""), - .cos => return self.wip.callIntrinsic(.cos, &.{llvm_ty}, ¶ms, ""), - .exp => return self.wip.callIntrinsic(.exp, &.{llvm_ty}, ¶ms, ""), - .exp2 => return self.wip.callIntrinsic(.exp2, &.{llvm_ty}, ¶ms, ""), - .fabs => return self.wip.callIntrinsic(.fabs, &.{llvm_ty}, ¶ms, ""), - .floor => return self.wip.callIntrinsic(.floor, &.{llvm_ty}, ¶ms, ""), - .log => return self.wip.callIntrinsic(.log, &.{llvm_ty}, ¶ms, ""), - .log10 => return self.wip.callIntrinsic(.log10, &.{llvm_ty}, ¶ms, ""), - .log2 => return self.wip.callIntrinsic(.log2, &.{llvm_ty}, ¶ms, ""), - .round => return self.wip.callIntrinsic(.round, &.{llvm_ty}, ¶ms, ""), - .sin => return self.wip.callIntrinsic(.sin, &.{llvm_ty}, ¶ms, ""), - .sqrt => return self.wip.callIntrinsic(.sqrt, &.{llvm_ty}, ¶ms, ""), - .trunc => return self.wip.callIntrinsic(.trunc, &.{llvm_ty}, ¶ms, ""), - .fma => return self.wip.callIntrinsic(.fma, &.{llvm_ty}, ¶ms, ""), + .add, .sub, .mul, .div, .fmod => return self.wip.bin(switch (op) { + .add => .fadd, + .sub => .fsub, + .mul => .fmul, + .div => .fdiv, + .fmod => .frem, + else => unreachable, + }, params[0], params[1], ""), + .fmax, + .fmin, + .ceil, + .cos, + .exp, + .exp2, + .fabs, + .floor, + .log, + .log10, + .log2, + .round, + .sin, + .sqrt, + .trunc, + .fma, + => return self.wip.callIntrinsic(.none, switch (op) { + .fmax => .maxnum, + .fmin => .minnum, + .ceil => .ceil, + .cos => .cos, + .exp => .exp, + .exp2 => .exp2, + .fabs => .fabs, + .floor => .floor, + .log => .log, + .log10 => .log10, + .log2 => .log2, + .round => .round, + .sin => .sin, + .sqrt => .sqrt, + .trunc => .trunc, + .fma => .fma, + else => unreachable, + }, &.{llvm_ty}, ¶ms, ""), .tan => unreachable, }; @@ -8215,6 +8248,7 @@ pub const FuncGen = struct { const llvm_lhs_ty = try o.lowerType(lhs_ty); const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder); const result = try self.wip.callIntrinsic( + .none, if (lhs_scalar_ty.isSignedInt(mod)) .@"sshl.sat" else .@"ushl.sat", &.{llvm_lhs_ty}, &.{ lhs, casted_rhs }, @@ -8588,21 +8622,14 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const fill_byte = if (safety) - try o.builder.intConst(.i8, 0xaa) - else - try o.builder.undefConst(.i8); - const operand_size = operand_ty.abiSize(mod); - const usize_ty = try o.lowerType(Type.usize); - const len = try o.builder.intValue(usize_ty, operand_size); - const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( - dest_ptr.toLlvm(&self.wip), - fill_byte.toLlvm(&o.builder), - len.toLlvm(&self.wip), - @intCast(dest_ptr_align.toByteUnits() orelse 0), - ptr_ty.isVolatilePtr(mod), - ), &self.wip); + const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod)); + _ = try self.wip.callMemSet( + dest_ptr, + Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)), + if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8), + len, + if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, + ); if (safety and mod.comp.bin_file.options.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } @@ -8655,14 +8682,14 @@ pub const FuncGen = struct { fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = try self.wip.callIntrinsic(.trap, &.{}, &.{}, ""); + _ = try self.wip.callIntrinsic(.none, .trap, &.{}, &.{}, ""); _ = try self.wip.@"unreachable"(); return .none; } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = try self.wip.callIntrinsic(.debugtrap, &.{}, &.{}, ""); + _ = try self.wip.callIntrinsic(.none, .debugtrap, &.{}, &.{}, ""); return .none; } @@ -8674,7 +8701,7 @@ pub const FuncGen = struct { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } - const result = try self.wip.callIntrinsic(.returnaddress, &.{}, &.{ + const result = try self.wip.callIntrinsic(.none, .returnaddress, &.{}, &.{ try o.builder.intValue(.i32, 0), }, ""); return self.wip.cast(.ptrtoint, result, llvm_usize, ""); @@ -8683,7 +8710,7 @@ pub const FuncGen = struct { fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; - const result = try self.wip.callIntrinsic(.frameaddress, &.{.ptr}, &.{ + const result = try self.wip.callIntrinsic(.none, .frameaddress, &.{.ptr}, &.{ try o.builder.intValue(.i32, 0), }, ""); return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), ""); @@ -8835,16 +8862,14 @@ pub const FuncGen = struct { const ptr_alignment = Builder.Alignment.fromByteUnits( info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod), ); - const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { - false => .normal, - true => .@"volatile", - }; + const access_kind: Builder.MemoryAccessKind = + if (info.flags.is_volatile) .@"volatile" else .normal; const elem_llvm_ty = try o.lowerType(elem_ty); if (llvm_abi_ty != .none) { // operand needs widening and truncating const loaded = try self.wip.loadAtomic( - ptr_kind, + access_kind, llvm_abi_ty, ptr, self.sync_scope, @@ -8855,7 +8880,7 @@ pub const FuncGen = struct { return self.wip.cast(.trunc, loaded, elem_llvm_ty, ""); } return self.wip.loadAtomic( - ptr_kind, + access_kind, elem_llvm_ty, ptr, self.sync_scope, @@ -8902,7 +8927,8 @@ pub const FuncGen = struct { const elem_ty = self.typeOf(bin_op.rhs); const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); - const is_volatile = ptr_ty.isVolatilePtr(mod); + const access_kind: Builder.MemoryAccessKind = + if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; // Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless // of the length. This means we need to emit a check where we skip the memset when the length @@ -8923,17 +8949,10 @@ pub const FuncGen = struct { try o.builder.undefValue(.i8); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, access_kind); } else { - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( - dest_ptr.toLlvm(&self.wip), - fill_byte.toLlvm(&self.wip), - len.toLlvm(&self.wip), - @intCast(dest_ptr_align.toByteUnits() orelse 0), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); } - if (safety and mod.comp.bin_file.options.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } @@ -8945,19 +8964,12 @@ pub const FuncGen = struct { // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { - const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val }); + const fill_byte = try o.builder.intValue(.i8, byte_val); const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); - if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte.toValue(), len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, access_kind); } else { - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( - dest_ptr.toLlvm(&self.wip), - fill_byte.toLlvm(&o.builder), - len.toLlvm(&self.wip), - @intCast(dest_ptr_align.toByteUnits() orelse 0), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); } return .none; } @@ -8972,15 +8984,9 @@ pub const FuncGen = struct { const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, access_kind); } else { - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( - dest_ptr.toLlvm(&self.wip), - fill_byte.toLlvm(&self.wip), - len.toLlvm(&self.wip), - @intCast(dest_ptr_align.toByteUnits() orelse 0), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); } return .none; } @@ -9006,10 +9012,10 @@ pub const FuncGen = struct { const body_block = try self.wip.block(1, "InlineMemsetBody"); const end_block = try self.wip.block(1, "InlineMemsetEnd"); - const usize_ty = try o.lowerType(Type.usize); + const llvm_usize_ty = try o.lowerType(Type.usize); const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try self.wip.extractValue(dest_slice, &.{1}, ""), - .One => try o.builder.intValue(usize_ty, ptr_ty.childType(mod).arrayLen(mod)), + .One => try o.builder.intValue(llvm_usize_ty, ptr_ty.childType(mod).arrayLen(mod)), .Many, .C => unreachable, }; const elem_llvm_ty = try o.lowerType(elem_ty); @@ -9022,25 +9028,22 @@ pub const FuncGen = struct { _ = try self.wip.brCond(end, body_block, end_block); self.wip.cursor = .{ .block = body_block }; - const elem_abi_alignment = elem_ty.abiAlignment(mod); - const it_ptr_alignment = Builder.Alignment.fromByteUnits( - @min(elem_abi_alignment, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)), + const elem_abi_align = elem_ty.abiAlignment(mod); + const it_ptr_align = Builder.Alignment.fromByteUnits( + @min(elem_abi_align, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)), ); if (isByRef(elem_ty, mod)) { - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( - it_ptr.toValue().toLlvm(&self.wip), - @intCast(it_ptr_alignment.toByteUnits() orelse 0), - value.toLlvm(&self.wip), - elem_abi_alignment, - (try o.builder.intConst(usize_ty, elem_abi_size)).toLlvm(&o.builder), - is_volatile, - ), &self.wip); - } else _ = try self.wip.store(switch (is_volatile) { - false => .normal, - true => .@"volatile", - }, value, it_ptr.toValue(), it_ptr_alignment); + _ = try self.wip.callMemCpy( + it_ptr.toValue(), + it_ptr_align, + value, + Builder.Alignment.fromByteUnits(elem_abi_align), + try o.builder.intValue(llvm_usize_ty, elem_abi_size), + access_kind, + ); + } else _ = try self.wip.store(access_kind, value, it_ptr.toValue(), it_ptr_align); const next_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, it_ptr.toValue(), &.{ - try o.builder.intValue(usize_ty, 1), + try o.builder.intValue(llvm_usize_ty, 1), }, ""); _ = try self.wip.br(loop_block); @@ -9055,7 +9058,7 @@ pub const FuncGen = struct { fill_byte: Builder.Value, len: Builder.Value, dest_ptr_align: Builder.Alignment, - is_volatile: bool, + access_kind: Builder.MemoryAccessKind, ) !void { const o = self.dg.object; const llvm_usize_ty = try o.lowerType(Type.usize); @@ -9064,13 +9067,7 @@ pub const FuncGen = struct { const end_block = try self.wip.block(2, "MemsetTrapEnd"); _ = try self.wip.brCond(cond, memset_block, end_block); self.wip.cursor = .{ .block = memset_block }; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( - dest_ptr.toLlvm(&self.wip), - fill_byte.toLlvm(&self.wip), - len.toLlvm(&self.wip), - @intCast(dest_ptr_align.toByteUnits() orelse 0), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = end_block }; } @@ -9086,7 +9083,8 @@ pub const FuncGen = struct { const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); - const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); + const access_kind: Builder.MemoryAccessKind = if (src_ptr_ty.isVolatilePtr(mod) or + dest_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; // When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction. // This instruction will trap on an invalid address, regardless of the length. @@ -9103,27 +9101,27 @@ pub const FuncGen = struct { const end_block = try self.wip.block(2, "MemcpyTrapEnd"); _ = try self.wip.brCond(cond, memcpy_block, end_block); self.wip.cursor = .{ .block = memcpy_block }; - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( - dest_ptr.toLlvm(&self.wip), - dest_ptr_ty.ptrAlignment(mod), - src_ptr.toLlvm(&self.wip), - src_ptr_ty.ptrAlignment(mod), - len.toLlvm(&self.wip), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemCpy( + dest_ptr, + Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)), + src_ptr, + Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)), + len, + access_kind, + ); _ = try self.wip.br(end_block); self.wip.cursor = .{ .block = end_block }; return .none; } - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( - dest_ptr.toLlvm(&self.wip), - dest_ptr_ty.ptrAlignment(mod), - src_ptr.toLlvm(&self.wip), - src_ptr_ty.ptrAlignment(mod), - len.toLlvm(&self.wip), - is_volatile, - ), &self.wip); + _ = try self.wip.callMemCpy( + dest_ptr, + Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)), + src_ptr, + Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)), + len, + access_kind, + ); return .none; } @@ -9196,8 +9194,8 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const result = - try self.wip.callIntrinsic( + const result = try self.wip.callIntrinsic( + .none, intrinsic, &.{try o.lowerType(operand_ty)}, &.{ operand, .false }, @@ -9214,6 +9212,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const result = try self.wip.callIntrinsic( + .none, intrinsic, &.{try o.lowerType(operand_ty)}, &.{operand}, @@ -9251,7 +9250,7 @@ pub const FuncGen = struct { bits = bits + 8; } - const result = try self.wip.callIntrinsic(.bswap, &.{llvm_operand_ty}, &.{operand}, ""); + const result = try self.wip.callIntrinsic(.none, .bswap, &.{llvm_operand_ty}, &.{operand}, ""); return self.wip.conv(.unsigned, result, try o.lowerType(inst_ty), ""); } @@ -9646,14 +9645,14 @@ pub const FuncGen = struct { const llvm_scalar_ty = try o.lowerType(scalar_ty); switch (reduce.operation) { - .And, .Or, .Xor => return self.wip.callIntrinsic(switch (reduce.operation) { + .And, .Or, .Xor => return self.wip.callIntrinsic(.none, switch (reduce.operation) { .And => .@"vector.reduce.and", .Or => .@"vector.reduce.or", .Xor => .@"vector.reduce.xor", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Min, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.wip.callIntrinsic(switch (reduce.operation) { + .Int => return self.wip.callIntrinsic(.none, switch (reduce.operation) { .Min => if (scalar_ty.isSignedInt(mod)) .@"vector.reduce.smin" else @@ -9665,7 +9664,7 @@ pub const FuncGen = struct { else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Float => if (intrinsicsAllowed(scalar_ty, target)) - return self.wip.callIntrinsic(switch (reduce.operation) { + return self.wip.callIntrinsic(.none, switch (reduce.operation) { .Min => .@"vector.reduce.fmin", .Max => .@"vector.reduce.fmax", else => unreachable, @@ -9673,13 +9672,13 @@ pub const FuncGen = struct { else => unreachable, }, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.wip.callIntrinsic(switch (reduce.operation) { + .Int => return self.wip.callIntrinsic(.none, switch (reduce.operation) { .Add => .@"vector.reduce.add", .Mul => .@"vector.reduce.mul", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Float => if (intrinsicsAllowed(scalar_ty, target)) - return self.wip.callIntrinsic(switch (reduce.operation) { + return self.wip.callIntrinsic(.none, switch (reduce.operation) { .Add => .@"vector.reduce.fadd", .Mul => .@"vector.reduce.fmul", else => unreachable, @@ -10032,7 +10031,7 @@ pub const FuncGen = struct { .data => {}, } - _ = try self.wip.callIntrinsic(.prefetch, &.{.ptr}, &.{ + _ = try self.wip.callIntrinsic(.none, .prefetch, &.{.ptr}, &.{ try self.resolveInst(prefetch.ptr), try o.builder.intValue(.i32, prefetch.rw), try o.builder.intValue(.i32, prefetch.locality), @@ -10056,14 +10055,12 @@ pub const FuncGen = struct { default: u32, comptime basename: []const u8, ) !Builder.Value { - const o = self.dg.object; - const intrinsic = switch (dimension) { + return self.wip.callIntrinsic(.none, switch (dimension) { 0 => @field(Builder.Intrinsic, basename ++ ".x"), 1 => @field(Builder.Intrinsic, basename ++ ".y"), 2 => @field(Builder.Intrinsic, basename ++ ".z"), - else => return o.builder.intValue(.i32, default), - }; - return self.wip.callIntrinsic(intrinsic, &.{}, &.{}, ""); + else => return self.dg.object.builder.intValue(.i32, default), + }, &.{}, &.{}, ""); } fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -10087,7 +10084,7 @@ pub const FuncGen = struct { // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 - const dispatch_ptr = try self.wip.callIntrinsic(.@"amdgcn.dispatch.ptr", &.{}, &.{}, ""); + const dispatch_ptr = try self.wip.callIntrinsic(.none, .@"amdgcn.dispatch.ptr", &.{}, &.{}, ""); // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. @@ -10188,7 +10185,7 @@ pub const FuncGen = struct { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); + return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal); } const payload_llvm_ty = try o.lowerType(payload_ty); return fg.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); @@ -10297,7 +10294,7 @@ pub const FuncGen = struct { ptr: Builder.Value, pointee_type: Type, ptr_alignment: Builder.Alignment, - is_volatile: bool, + access_kind: Builder.MemoryAccessKind, ) !Builder.Value { const o = fg.dg.object; const mod = o.module; @@ -10306,16 +10303,15 @@ pub const FuncGen = struct { @max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)), ); const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align); - const usize_ty = try o.lowerType(Type.usize); const size_bytes = pointee_type.abiSize(mod); - _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildMemCpy( - result_ptr.toLlvm(&fg.wip), - @intCast(result_align.toByteUnits() orelse 0), - ptr.toLlvm(&fg.wip), - @intCast(ptr_alignment.toByteUnits() orelse 0), - (try o.builder.intConst(usize_ty, size_bytes)).toLlvm(&o.builder), - is_volatile, - ), &fg.wip); + _ = try fg.wip.callMemCpy( + result_ptr, + result_align, + ptr, + ptr_alignment, + try o.builder.intValue(try o.lowerType(Type.usize), size_bytes), + access_kind, + ); return result_ptr; } @@ -10332,10 +10328,8 @@ pub const FuncGen = struct { const ptr_alignment = Builder.Alignment.fromByteUnits( info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod), ); - const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { - false => .normal, - true => .@"volatile", - }; + const access_kind: Builder.MemoryAccessKind = + if (info.flags.is_volatile) .@"volatile" else .normal; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { @@ -10343,19 +10337,20 @@ pub const FuncGen = struct { const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); + const loaded_vector = try self.wip.load(access_kind, vec_ty, ptr, ptr_alignment, ""); return self.wip.extractElement(loaded_vector, index_u32, ""); } if (info.packed_offset.host_size == 0) { if (isByRef(elem_ty, mod)) { - return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile); + return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind); } - return self.wip.load(ptr_kind, try o.lowerType(elem_ty), ptr, ptr_alignment, ""); + return self.wip.load(access_kind, try o.lowerType(elem_ty), ptr, ptr_alignment, ""); } const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); - const containing_int = try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); + const containing_int = + try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); const elem_bits = ptr_ty.childType(mod).bitSize(mod); const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); @@ -10402,10 +10397,8 @@ pub const FuncGen = struct { return; } const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); - const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { - false => .normal, - true => .@"volatile", - }; + const access_kind: Builder.MemoryAccessKind = + if (info.flags.is_volatile) .@"volatile" else .normal; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { @@ -10413,12 +10406,12 @@ pub const FuncGen = struct { const vec_elem_ty = try o.lowerType(elem_ty); const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); + const loaded_vector = try self.wip.load(access_kind, vec_ty, ptr, ptr_alignment, ""); const modified_vector = try self.wip.insertElement(loaded_vector, elem, index_u32, ""); assert(ordering == .none); - _ = try self.wip.store(ptr_kind, modified_vector, ptr, ptr_alignment); + _ = try self.wip.store(access_kind, modified_vector, ptr, ptr_alignment); return; } @@ -10426,7 +10419,7 @@ pub const FuncGen = struct { const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); assert(ordering == .none); const containing_int = - try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); + try self.wip.load(access_kind, containing_int_ty, ptr, ptr_alignment, ""); const elem_bits = ptr_ty.childType(mod).bitSize(mod); const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit @@ -10450,23 +10443,29 @@ pub const FuncGen = struct { const ored_value = try self.wip.bin(.@"or", shifted_value, anded_containing_int, ""); assert(ordering == .none); - _ = try self.wip.store(ptr_kind, ored_value, ptr, ptr_alignment); + _ = try self.wip.store(access_kind, ored_value, ptr, ptr_alignment); return; } if (!isByRef(elem_ty, mod)) { - _ = try self.wip.storeAtomic(ptr_kind, elem, ptr, self.sync_scope, ordering, ptr_alignment); + _ = try self.wip.storeAtomic( + access_kind, + elem, + ptr, + self.sync_scope, + ordering, + ptr_alignment, + ); return; } assert(ordering == .none); - const size_bytes = elem_ty.abiSize(mod); - _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( - ptr.toLlvm(&self.wip), - @intCast(ptr_alignment.toByteUnits() orelse 0), - elem.toLlvm(&self.wip), - elem_ty.abiAlignment(mod), - (try o.builder.intConst(try o.lowerType(Type.usize), size_bytes)).toLlvm(&o.builder), - info.flags.is_volatile, - ), &self.wip); + _ = try self.wip.callMemCpy( + ptr, + ptr_alignment, + elem, + Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)), + try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)), + access_kind, + ); } fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void { diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 0296fbd147..5495ea22e8 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2397,7 +2397,7 @@ pub const Intrinsic = enum { @"udiv.fix.sat", // Specialised Arithmetic - canonicalisze, + canonicalize, fmuladd, // Vector Reduction @@ -2416,14 +2416,15 @@ pub const Intrinsic = enum { @"vector.reduce.fmin", @"vector.reduce.fmaximum", @"vector.reduce.fminimum", - @"vector.reduce.insert", - @"vector.reduce.extract", @"vector.insert", @"vector.extract", + // Floating-Point Test + @"is.fpclass", + // General - @"llvm.var.annotation", - @"llvm.ptr.annotation", + @"var.annotation", + @"ptr.annotation", annotation, @"codeview.annotation", trap, @@ -2442,7 +2443,7 @@ pub const Intrinsic = enum { @"arithmetic.fence", donothing, @"load.relative", - @"llvm.sideeffect", + sideeffect, @"is.constant", ptrmask, @"threadlocal.address", @@ -2483,10 +2484,7 @@ pub const Intrinsic = enum { }; }; - const signatures = std.enums.EnumArray(Intrinsic, Signature).initDefault(.{ - .ret_len = 0, - .params = &.{}, - }, .{ + const signatures = std.enums.EnumArray(Intrinsic, Signature).init(.{ .va_start = .{ .ret_len = 0, .params = &.{ @@ -2603,6 +2601,56 @@ pub const Intrinsic = enum { }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, + .memcpy = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{ .@"noalias", .nocapture, .writeonly } }, + .{ .kind = .overloaded, .attrs = &.{ .@"noalias", .nocapture, .readonly } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nounwind, .willreturn, .{ .memory = .{ .argmem = .readwrite } } }, + }, + .@"memcpy.inline" = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{ .@"noalias", .nocapture, .writeonly } }, + .{ .kind = .overloaded, .attrs = &.{ .@"noalias", .nocapture, .readonly } }, + .{ .kind = .overloaded, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nounwind, .willreturn, .{ .memory = .{ .argmem = .readwrite } } }, + }, + .memmove = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{ .nocapture, .writeonly } }, + .{ .kind = .overloaded, .attrs = &.{ .nocapture, .readonly } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nounwind, .willreturn, .{ .memory = .{ .argmem = .readwrite } } }, + }, + .memset = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{ .nocapture, .writeonly } }, + .{ .kind = .{ .type = .i8 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nounwind, .willreturn, .{ .memory = .{ .argmem = .write } } }, + }, + .@"memset.inline" = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{ .nocapture, .writeonly } }, + .{ .kind = .{ .type = .i8 } }, + .{ .kind = .overloaded, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nounwind, .willreturn, .{ .memory = .{ .argmem = .write } } }, + }, .sqrt = .{ .ret_len = 1, .params = &.{ @@ -2884,7 +2932,7 @@ pub const Intrinsic = enum { .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, - .{ .kind = .{ .type = .i1 } }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, @@ -2893,7 +2941,7 @@ pub const Intrinsic = enum { .params = &.{ .{ .kind = .overloaded }, .{ .kind = .{ .matches = 0 } }, - .{ .kind = .{ .type = .i1 } }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, }, .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, @@ -3115,6 +3163,25 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, + .canonicalize = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .fmuladd = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"vector.reduce.add" = .{ .ret_len = 1, .params = &.{ @@ -3257,6 +3324,57 @@ pub const Intrinsic = enum { .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, }, + .@"is.fpclass" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .matches_changed_scalar = .{ .index = 1, .scalar = .i1 } } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i32 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + + .@"var.annotation" = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 1 } }, + .{ .kind = .{ .type = .i32 } }, + .{ .kind = .{ .matches = 1 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .readwrite } } }, + }, + .@"ptr.annotation" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 2 } }, + .{ .kind = .{ .type = .i32 } }, + .{ .kind = .{ .matches = 2 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .readwrite } } }, + }, + .annotation = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 2 } }, + .{ .kind = .{ .type = .i32 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .readwrite } } }, + }, + .@"codeview.annotation" = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .{ .type = .metadata } }, + }, + .attrs = &.{ .nocallback, .noduplicate, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .readwrite } } }, + }, .trap = .{ .ret_len = 0, .params = &.{}, @@ -3274,6 +3392,156 @@ pub const Intrinsic = enum { }, .attrs = &.{ .cold, .noreturn, .nounwind }, }, + .stackprotector = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + .stackguard = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn }, + }, + .objectsize = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .overloaded }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + .{ .kind = .{ .type = .i1 }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .expect = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"expect.with.probability" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .{ .type = .double }, .attrs = &.{.immarg} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .assume = .{ + .ret_len = 0, + .params = &.{ + .{ .kind = .{ .type = .i1 }, .attrs = &.{.noundef} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .write } } }, + }, + .@"ssa.copy" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 }, .attrs = &.{.returned} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"type.test" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .type = .i1 } }, + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .metadata } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"type.checked.load" = .{ + .ret_len = 2, + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .i1 } }, + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .i32 } }, + .{ .kind = .{ .type = .metadata } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"type.checked.load.relative" = .{ + .ret_len = 2, + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .i1 } }, + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .i32 } }, + .{ .kind = .{ .type = .metadata } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"arithmetic.fence" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .donothing = .{ + .ret_len = 0, + .params = &.{}, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"load.relative" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .{ .type = .ptr } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .argmem = .read } } }, + }, + .sideeffect = .{ + .ret_len = 0, + .params = &.{}, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = .{ .inaccessiblemem = .readwrite } } }, + }, + .@"is.constant" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .{ .type = .i1 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .convergent, .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .ptrmask = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + .{ .kind = .{ .matches = 0 } }, + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .@"threadlocal.address" = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded, .attrs = &.{.nonnull} }, + .{ .kind = .{ .matches = 0 }, .attrs = &.{.nonnull} }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .speculatable, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, + .vscale = .{ + .ret_len = 1, + .params = &.{ + .{ .kind = .overloaded }, + }, + .attrs = &.{ .nocallback, .nofree, .nosync, .nounwind, .willreturn, .{ .memory = Attribute.Memory.all(.none) } }, + }, .@"amdgcn.workitem.id.x" = .{ .ret_len = 1, @@ -5391,6 +5659,7 @@ pub const WipFunction = struct { pub fn callIntrinsic( self: *WipFunction, + function_attributes: FunctionAttributes, id: Intrinsic, overload: []const Type, args: []const Value, @@ -5400,7 +5669,7 @@ pub const WipFunction = struct { return self.call( .normal, CallConv.default, - .none, + function_attributes, intrinsic.typeOf(self.builder), intrinsic.toValue(self.builder), args, @@ -5408,6 +5677,57 @@ pub const WipFunction = struct { ); } + pub fn callMemCpy( + self: *WipFunction, + dst: Value, + dst_align: Alignment, + src: Value, + src_align: Alignment, + len: Value, + kind: MemoryAccessKind, + ) Allocator.Error!Instruction.Index { + var dst_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = dst_align })}; + var src_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = src_align })}; + const value = try self.callIntrinsic( + try self.builder.fnAttrs(&.{ + .none, + .none, + try self.builder.attrs(&dst_attrs), + try self.builder.attrs(&src_attrs), + }), + .memcpy, + &.{ dst.typeOfWip(self), src.typeOfWip(self), len.typeOfWip(self) }, + &.{ dst, src, len, switch (kind) { + .normal => Value.false, + .@"volatile" => Value.true, + } }, + undefined, + ); + return value.unwrap().instruction; + } + + pub fn callMemSet( + self: *WipFunction, + dst: Value, + dst_align: Alignment, + val: Value, + len: Value, + kind: MemoryAccessKind, + ) Allocator.Error!Instruction.Index { + var dst_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = dst_align })}; + const value = try self.callIntrinsic( + try self.builder.fnAttrs(&.{ .none, .none, try self.builder.attrs(&dst_attrs) }), + .memset, + &.{ dst.typeOfWip(self), len.typeOfWip(self) }, + &.{ dst, val, len, switch (kind) { + .normal => Value.false, + .@"volatile" => Value.true, + } }, + undefined, + ); + return value.unwrap().instruction; + } + pub fn vaArg(self: *WipFunction, list: Value, ty: Type, name: []const u8) Allocator.Error!Value { try self.ensureUnusedExtraCapacity(1, Instruction.VaArg, 0); const instruction = try self.addInst(name, .{ diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index d60ccb85bb..d5a1445176 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -951,27 +951,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildMemSet = ZigLLVMBuildMemSet; - extern fn ZigLLVMBuildMemSet( - B: *Builder, - Ptr: *Value, - Val: *Value, - Len: *Value, - Align: c_uint, - is_volatile: bool, - ) *Value; - - pub const buildMemCpy = ZigLLVMBuildMemCpy; - extern fn ZigLLVMBuildMemCpy( - B: *Builder, - Dst: *Value, - DstAlign: c_uint, - Src: *Value, - SrcAlign: c_uint, - Size: *Value, - is_volatile: bool, - ) *Value; - pub const buildExactUDiv = LLVMBuildExactUDiv; extern fn LLVMBuildExactUDiv(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; diff --git a/src/value.zig b/src/value.zig index 6488f98e43..1a2e85bb1e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3831,7 +3831,7 @@ pub const Value = struct { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?Value { + pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?u8 { const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); @@ -3852,7 +3852,7 @@ pub const Value = struct { for (byte_buffer[1..]) |byte| { if (byte != first_byte) return null; } - return try mod.intValue(Type.u8, first_byte); + return first_byte; } pub fn isGenericPoison(val: Value) bool { diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index cdca81c218..5ae8944c67 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -408,22 +408,6 @@ void ZigLLVMSetTailCallKind(LLVMValueRef Call, enum ZigLLVMTailCallKind TailCall unwrap(Call)->setTailCallKind(TCK); } -LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, - LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile) -{ - CallInst *call_inst = unwrap(B)->CreateMemCpy(unwrap(Dst), - MaybeAlign(DstAlign), unwrap(Src), MaybeAlign(SrcAlign), unwrap(Size), isVolatile); - return wrap(call_inst); -} - -LLVMValueRef ZigLLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef Val, LLVMValueRef Size, - unsigned Align, bool isVolatile) -{ - CallInst *call_inst = unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Size), - MaybeAlign(Align), isVolatile); - return wrap(call_inst); -} - void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) { assert( isa(unwrap(fn)) ); Function *unwrapped_function = reinterpret_cast(unwrap(fn)); diff --git a/src/zig_llvm.h b/src/zig_llvm.h index f8ac0bf88d..7a0fb51b16 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -122,12 +122,6 @@ enum ZigLLVM_CallingConv { ZigLLVM_MaxID = 1023, }; -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, - LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size, bool isVolatile); - -ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr, LLVMValueRef Val, LLVMValueRef Size, - unsigned Align, bool isVolatile); - ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNSWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, const char *name); ZIG_EXTERN_C LLVMValueRef ZigLLVMBuildNUWShl(LLVMBuilderRef builder, LLVMValueRef LHS, LLVMValueRef RHS, -- cgit v1.2.3 From a0cb03ed99d0e157389d8afad4ab4f9c8e8ea2db Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 09:35:38 -0400 Subject: llvm: finish converting instructions --- src/codegen/llvm.zig | 148 +++++++-------- src/codegen/llvm/Builder.zig | 424 +++++++++++++++++++++++++++++------------- src/codegen/llvm/bindings.zig | 5 +- src/zig_llvm.cpp | 4 +- 4 files changed, 364 insertions(+), 217 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 47cfe58904..3684237cb2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4911,8 +4911,8 @@ pub const FuncGen = struct { .array_to_slice => try self.airArrayToSlice(inst), .float_from_int => try self.airFloatFromInt(inst), - .cmpxchg_weak => try self.airCmpxchg(inst, true), - .cmpxchg_strong => try self.airCmpxchg(inst, false), + .cmpxchg_weak => try self.airCmpxchg(inst, .weak), + .cmpxchg_strong => try self.airCmpxchg(inst, .strong), .fence => try self.airFence(inst), .atomic_rmw => try self.airAtomicRmw(inst), .atomic_load => try self.airAtomicLoad(inst), @@ -8723,15 +8723,20 @@ pub const FuncGen = struct { return .none; } - fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !Builder.Value { + fn airCmpxchg( + self: *FuncGen, + inst: Air.Inst.Index, + kind: Builder.Function.Instruction.CmpXchg.Kind, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); + const ptr_ty = self.typeOf(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.typeOf(extra.ptr).childType(mod); + const operand_ty = ptr_ty.childType(mod); const llvm_operand_ty = try o.lowerType(operand_ty); const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); if (llvm_abi_ty != .none) { @@ -8742,22 +8747,18 @@ pub const FuncGen = struct { new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, ""); } - const llvm_result_ty = try o.builder.structType(.normal, &.{ - if (llvm_abi_ty != .none) llvm_abi_ty else llvm_operand_ty, - .i1, - }); - const result = (try self.wip.unimplemented(llvm_result_ty, "")).finish( - self.builder.buildAtomicCmpXchg( - ptr.toLlvm(&self.wip), - expected_value.toLlvm(&self.wip), - new_value.toLlvm(&self.wip), - @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.successOrder()))), - @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.failureOrder()))), - llvm.Bool.fromBool(self.sync_scope == .singlethread), - ), - &self.wip, + const result = try self.wip.cmpxchg( + kind, + if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, + ptr, + expected_value, + new_value, + self.sync_scope, + toLlvmAtomicOrdering(extra.successOrder()), + toLlvmAtomicOrdering(extra.failureOrder()), + Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)), + "", ); - result.toLlvm(&self.wip).setWeak(llvm.Bool.fromBool(is_weak)); const optional_ty = self.typeOfIndex(inst); @@ -8789,63 +8790,54 @@ pub const FuncGen = struct { const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); - const single_threaded = llvm.Bool.fromBool(self.sync_scope == .singlethread); - const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .xchg); const llvm_operand_ty = try o.lowerType(operand_ty); + + const access_kind: Builder.MemoryAccessKind = + if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal; + const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. - const casted_operand = try self.wip.cast( - if (is_float) .bitcast else if (is_signed_int) .sext else .zext, - @enumFromInt(@intFromEnum(operand)), - llvm_abi_ty, - "", - ); - - const uncasted_result = (try self.wip.unimplemented(llvm_abi_ty, "")).finish( - self.builder.buildAtomicRmw( - op, - ptr.toLlvm(&self.wip), - casted_operand.toLlvm(&self.wip), - @enumFromInt(@intFromEnum(ordering)), - single_threaded, + return self.wip.cast(if (is_float) .bitcast else .trunc, try self.wip.atomicrmw( + access_kind, + op, + ptr, + try self.wip.cast( + if (is_float) .bitcast else if (is_signed_int) .sext else .zext, + operand, + llvm_abi_ty, + "", ), - &self.wip, - ); - - if (is_float) { - return self.wip.cast(.bitcast, uncasted_result, llvm_operand_ty, ""); - } else { - return self.wip.cast(.trunc, uncasted_result, llvm_operand_ty, ""); - } + self.sync_scope, + ordering, + ptr_alignment, + "", + ), llvm_operand_ty, ""); } - if (!llvm_operand_ty.isPointer(&o.builder)) { - return (try self.wip.unimplemented(llvm_operand_ty, "")).finish( - self.builder.buildAtomicRmw( - op, - ptr.toLlvm(&self.wip), - operand.toLlvm(&self.wip), - @enumFromInt(@intFromEnum(ordering)), - single_threaded, - ), - &self.wip, - ); - } + if (!llvm_operand_ty.isPointer(&o.builder)) return self.wip.atomicrmw( + access_kind, + op, + ptr, + operand, + self.sync_scope, + ordering, + ptr_alignment, + "", + ); // It's a pointer but we need to treat it as an int. - const llvm_usize = try o.lowerType(Type.usize); - const casted_operand = try self.wip.cast(.ptrtoint, operand, llvm_usize, ""); - const uncasted_result = (try self.wip.unimplemented(llvm_usize, "")).finish( - self.builder.buildAtomicRmw( - op, - ptr.toLlvm(&self.wip), - casted_operand.toLlvm(&self.wip), - @enumFromInt(@intFromEnum(ordering)), - single_threaded, - ), - &self.wip, - ); - return self.wip.cast(.inttoptr, uncasted_result, llvm_operand_ty, ""); + return self.wip.cast(.inttoptr, try self.wip.atomicrmw( + access_kind, + op, + ptr, + try self.wip.cast(.ptrtoint, operand, try o.lowerType(Type.usize), ""), + self.sync_scope, + ordering, + ptr_alignment, + "", + ), llvm_operand_ty, ""); } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -10581,17 +10573,17 @@ fn toLlvmAtomicRmwBinOp( op: std.builtin.AtomicRmwOp, is_signed: bool, is_float: bool, -) llvm.AtomicRMWBinOp { +) Builder.Function.Instruction.AtomicRmw.Operation { return switch (op) { - .Xchg => .Xchg, - .Add => if (is_float) .FAdd else return .Add, - .Sub => if (is_float) .FSub else return .Sub, - .And => .And, - .Nand => .Nand, - .Or => .Or, - .Xor => .Xor, - .Max => if (is_float) .FMax else if (is_signed) .Max else return .UMax, - .Min => if (is_float) .FMin else if (is_signed) .Min else return .UMin, + .Xchg => .xchg, + .Add => if (is_float) .fadd else return .add, + .Sub => if (is_float) .fsub else return .sub, + .And => .@"and", + .Nand => .nand, + .Or => .@"or", + .Xor => .xor, + .Max => if (is_float) .fmax else if (is_signed) .max else return .umax, + .Min => if (is_float) .fmin else if (is_signed) .min else return .umin, }; } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 5495ea22e8..396580a664 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2177,7 +2177,7 @@ pub const Global = struct { if (!builder.useLibLlvm()) return; const index = @intFromEnum(self.unwrap(builder)); const name_slice = self.name(builder).slice(builder) orelse ""; - builder.llvm.globals.items[index].setValueName2(name_slice.ptr, name_slice.len); + builder.llvm.globals.items[index].setValueName(name_slice.ptr, name_slice.len); } fn replaceAssumeCapacity(self: Index, other: Index, builder: *Builder) void { @@ -3759,12 +3759,15 @@ pub const Function = struct { arg, ashr, @"ashr exact", + atomicrmw, bitcast, block, br, br_cond, call, @"call fast", + cmpxchg, + @"cmpxchg weak", extractelement, extractvalue, fadd, @@ -3833,8 +3836,6 @@ pub const Function = struct { inttoptr, load, @"load atomic", - @"load atomic volatile", - @"load volatile", lshr, @"lshr exact", mul, @@ -3865,8 +3866,6 @@ pub const Function = struct { srem, store, @"store atomic", - @"store atomic volatile", - @"store volatile", sub, @"sub nsw", @"sub nuw", @@ -3879,7 +3878,6 @@ pub const Function = struct { @"udiv exact", urem, uitofp, - unimplemented, @"unreachable", va_arg, xor, @@ -3920,8 +3918,6 @@ pub const Function = struct { .@"ret void", .store, .@"store atomic", - .@"store atomic volatile", - .@"store volatile", .@"switch", .@"unreachable", => false, @@ -3933,7 +3929,6 @@ pub const Function = struct { .@"notail call fast", .@"tail call", .@"tail call fast", - .unimplemented, => self.typeOfWip(wip) != .void, else => true, }; @@ -4003,6 +3998,7 @@ pub const Function = struct { ), .arg => wip.function.typeOf(wip.builder) .functionParameters(wip.builder)[instruction.data], + .atomicrmw => wip.extraData(AtomicRmw, instruction.data).val.typeOfWip(wip), .block => .label, .br, .br_cond, @@ -4011,8 +4007,6 @@ pub const Function = struct { .@"ret void", .store, .@"store atomic", - .@"store atomic volatile", - .@"store volatile", .@"switch", .@"unreachable", => .none, @@ -4025,6 +4019,12 @@ pub const Function = struct { .@"tail call", .@"tail call fast", => wip.extraData(Call, instruction.data).ty.functionReturn(wip.builder), + .cmpxchg, + .@"cmpxchg weak", + => wip.builder.structTypeAssumeCapacity(.normal, &.{ + wip.extraData(CmpXchg, instruction.data).cmp.typeOfWip(wip), + .i1, + }) catch unreachable, .extractelement => wip.extraData(ExtractElement, instruction.data) .val.typeOfWip(wip).childType(wip.builder), .extractvalue => { @@ -4096,8 +4096,6 @@ pub const Function = struct { .insertvalue => wip.extraData(InsertValue, instruction.data).val.typeOfWip(wip), .load, .@"load atomic", - .@"load atomic volatile", - .@"load volatile", => wip.extraData(Load, instruction.data).type, .phi, .@"phi fast", @@ -4112,7 +4110,6 @@ pub const Function = struct { wip.builder, ); }, - .unimplemented => @enumFromInt(instruction.data), .va_arg => wip.extraData(VaArg, instruction.data).type, }; } @@ -4186,6 +4183,8 @@ pub const Function = struct { ), .arg => function.global.typeOf(builder) .functionParameters(builder)[instruction.data], + .atomicrmw => function.extraData(AtomicRmw, instruction.data) + .val.typeOf(function_index, builder), .block => .label, .br, .br_cond, @@ -4194,8 +4193,6 @@ pub const Function = struct { .@"ret void", .store, .@"store atomic", - .@"store atomic volatile", - .@"store volatile", .@"switch", .@"unreachable", => .none, @@ -4208,6 +4205,13 @@ pub const Function = struct { .@"tail call", .@"tail call fast", => function.extraData(Call, instruction.data).ty.functionReturn(builder), + .cmpxchg, + .@"cmpxchg weak", + => builder.structTypeAssumeCapacity(.normal, &.{ + function.extraData(CmpXchg, instruction.data) + .cmp.typeOf(function_index, builder), + .i1, + }) catch unreachable, .extractelement => function.extraData(ExtractElement, instruction.data) .val.typeOf(function_index, builder).childType(builder), .extractvalue => { @@ -4282,8 +4286,6 @@ pub const Function = struct { .val.typeOf(function_index, builder), .load, .@"load atomic", - .@"load atomic volatile", - .@"load volatile", => function.extraData(Load, instruction.data).type, .phi, .@"phi fast", @@ -4298,7 +4300,6 @@ pub const Function = struct { builder, ); }, - .unimplemented => @enumFromInt(instruction.data), .va_arg => function.extraData(VaArg, instruction.data).type, }; } @@ -4346,7 +4347,7 @@ pub const Function = struct { return wip.llvm.instructions.items[@intFromEnum(self)]; } - fn llvmName(self: Instruction.Index, wip: *const WipFunction) [*:0]const u8 { + fn llvmName(self: Instruction.Index, wip: *const WipFunction) [:0]const u8 { return if (wip.builder.strip) "" else @@ -4419,15 +4420,49 @@ pub const Function = struct { }; pub const Load = struct { + info: MemoryAccessInfo, type: Type, ptr: Value, - info: MemoryAccessInfo, }; pub const Store = struct { + info: MemoryAccessInfo, val: Value, ptr: Value, + }; + + pub const CmpXchg = struct { info: MemoryAccessInfo, + ptr: Value, + cmp: Value, + new: Value, + + pub const Kind = enum { strong, weak }; + }; + + pub const AtomicRmw = struct { + info: MemoryAccessInfo, + ptr: Value, + val: Value, + + pub const Operation = enum(u5) { + xchg, + add, + sub, + @"and", + nand, + @"or", + xor, + max, + min, + umax, + umin, + fadd, + fsub, + fmax, + fmin, + none = std.math.maxInt(u5), + }; }; pub const GetElementPtr = struct { @@ -5163,21 +5198,21 @@ pub const WipFunction = struct { pub fn load( self: *WipFunction, - kind: MemoryAccessKind, + access_kind: MemoryAccessKind, ty: Type, ptr: Value, alignment: Alignment, name: []const u8, ) Allocator.Error!Value { - return self.loadAtomic(kind, ty, ptr, .system, .none, alignment, name); + return self.loadAtomic(access_kind, ty, ptr, .system, .none, alignment, name); } pub fn loadAtomic( self: *WipFunction, - kind: MemoryAccessKind, + access_kind: MemoryAccessKind, ty: Type, ptr: Value, - scope: SyncScope, + sync_scope: SyncScope, ordering: AtomicOrdering, alignment: Alignment, name: []const u8, @@ -5186,22 +5221,21 @@ pub const WipFunction = struct { try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0); const instruction = try self.addInst(name, .{ .tag = switch (ordering) { - .none => switch (kind) { - .normal => .load, - .@"volatile" => .@"load volatile", - }, - else => switch (kind) { - .normal => .@"load atomic", - .@"volatile" => .@"load atomic volatile", - }, + .none => .load, + else => .@"load atomic", }, .data = self.addExtraAssumeCapacity(Instruction.Load{ + .info = .{ + .access_kind = access_kind, + .sync_scope = switch (ordering) { + .none => .system, + else => sync_scope, + }, + .success_ordering = ordering, + .alignment = alignment, + }, .type = ty, .ptr = ptr, - .info = .{ .scope = switch (ordering) { - .none => .system, - else => scope, - }, .ordering = ordering, .alignment = alignment }, }), }); if (self.builder.useLibLlvm()) { @@ -5210,6 +5244,7 @@ pub const WipFunction = struct { ptr.toLlvm(self), instruction.llvmName(self), ); + if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); @@ -5229,10 +5264,10 @@ pub const WipFunction = struct { pub fn storeAtomic( self: *WipFunction, - kind: MemoryAccessKind, + access_kind: MemoryAccessKind, val: Value, ptr: Value, - scope: SyncScope, + sync_scope: SyncScope, ordering: AtomicOrdering, alignment: Alignment, ) Allocator.Error!Instruction.Index { @@ -5240,30 +5275,26 @@ pub const WipFunction = struct { try self.ensureUnusedExtraCapacity(1, Instruction.Store, 0); const instruction = try self.addInst(null, .{ .tag = switch (ordering) { - .none => switch (kind) { - .normal => .store, - .@"volatile" => .@"store volatile", - }, - else => switch (kind) { - .normal => .@"store atomic", - .@"volatile" => .@"store atomic volatile", - }, + .none => .store, + else => .@"store atomic", }, .data = self.addExtraAssumeCapacity(Instruction.Store{ + .info = .{ + .access_kind = access_kind, + .sync_scope = switch (ordering) { + .none => .system, + else => sync_scope, + }, + .success_ordering = ordering, + .alignment = alignment, + }, .val = val, .ptr = ptr, - .info = .{ .scope = switch (ordering) { - .none => .system, - else => scope, - }, .ordering = ordering, .alignment = alignment }, }), }); if (self.builder.useLibLlvm()) { const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self)); - switch (kind) { - .normal => {}, - .@"volatile" => llvm_instruction.setVolatile(.True), - } + if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); @@ -5273,7 +5304,7 @@ pub const WipFunction = struct { pub fn fence( self: *WipFunction, - scope: SyncScope, + sync_scope: SyncScope, ordering: AtomicOrdering, ) Allocator.Error!Instruction.Index { assert(ordering != .none); @@ -5281,21 +5312,130 @@ pub const WipFunction = struct { const instruction = try self.addInst(null, .{ .tag = .fence, .data = @bitCast(MemoryAccessInfo{ - .scope = scope, - .ordering = ordering, - .alignment = undefined, + .sync_scope = sync_scope, + .success_ordering = ordering, }), }); if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( self.llvm.builder.buildFence( @enumFromInt(@intFromEnum(ordering)), - llvm.Bool.fromBool(scope == .singlethread), + llvm.Bool.fromBool(sync_scope == .singlethread), "", ), ); return instruction; } + pub fn cmpxchg( + self: *WipFunction, + kind: Instruction.CmpXchg.Kind, + access_kind: MemoryAccessKind, + ptr: Value, + cmp: Value, + new: Value, + sync_scope: SyncScope, + success_ordering: AtomicOrdering, + failure_ordering: AtomicOrdering, + alignment: Alignment, + name: []const u8, + ) Allocator.Error!Value { + assert(ptr.typeOfWip(self).isPointer(self.builder)); + const ty = cmp.typeOfWip(self); + assert(ty == new.typeOfWip(self)); + assert(success_ordering != .none); + assert(failure_ordering != .none); + + _ = try self.builder.structType(.normal, &.{ ty, .i1 }); + try self.ensureUnusedExtraCapacity(1, Instruction.CmpXchg, 0); + const instruction = try self.addInst(name, .{ + .tag = switch (kind) { + .strong => .cmpxchg, + .weak => .@"cmpxchg weak", + }, + .data = self.addExtraAssumeCapacity(Instruction.CmpXchg{ + .info = .{ + .access_kind = access_kind, + .sync_scope = sync_scope, + .success_ordering = success_ordering, + .failure_ordering = failure_ordering, + .alignment = alignment, + }, + .ptr = ptr, + .cmp = cmp, + .new = new, + }), + }); + if (self.builder.useLibLlvm()) { + const llvm_instruction = self.llvm.builder.buildAtomicCmpXchg( + ptr.toLlvm(self), + cmp.toLlvm(self), + new.toLlvm(self), + @enumFromInt(@intFromEnum(success_ordering)), + @enumFromInt(@intFromEnum(failure_ordering)), + llvm.Bool.fromBool(sync_scope == .singlethread), + ); + if (kind == .weak) llvm_instruction.setWeak(.True); + if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); + if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); + const llvm_name = instruction.llvmName(self); + if (llvm_name.len > 0) llvm_instruction.setValueName( + llvm_name.ptr, + @intCast(llvm_name.len), + ); + self.llvm.instructions.appendAssumeCapacity(llvm_instruction); + } + return instruction.toValue(); + } + + pub fn atomicrmw( + self: *WipFunction, + access_kind: MemoryAccessKind, + operation: Instruction.AtomicRmw.Operation, + ptr: Value, + val: Value, + sync_scope: SyncScope, + ordering: AtomicOrdering, + alignment: Alignment, + name: []const u8, + ) Allocator.Error!Value { + assert(ptr.typeOfWip(self).isPointer(self.builder)); + assert(ordering != .none); + + try self.ensureUnusedExtraCapacity(1, Instruction.AtomicRmw, 0); + const instruction = try self.addInst(name, .{ + .tag = .atomicrmw, + .data = self.addExtraAssumeCapacity(Instruction.AtomicRmw{ + .info = .{ + .access_kind = access_kind, + .atomic_rmw_operation = operation, + .sync_scope = sync_scope, + .success_ordering = ordering, + .alignment = alignment, + }, + .ptr = ptr, + .val = val, + }), + }); + if (self.builder.useLibLlvm()) { + const llvm_instruction = self.llvm.builder.buildAtomicRmw( + @enumFromInt(@intFromEnum(operation)), + ptr.toLlvm(self), + val.toLlvm(self), + @enumFromInt(@intFromEnum(ordering)), + llvm.Bool.fromBool(sync_scope == .singlethread), + ); + if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); + if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); + const llvm_name = instruction.llvmName(self); + if (llvm_name.len > 0) llvm_instruction.setValueName( + llvm_name.ptr, + @intCast(llvm_name.len), + ); + self.llvm.instructions.appendAssumeCapacity(llvm_instruction); + } + return instruction.toValue(); + } + pub fn gep( self: *WipFunction, kind: Instruction.GetElementPtr.Kind, @@ -5747,30 +5887,6 @@ pub const WipFunction = struct { return instruction.toValue(); } - pub const WipUnimplemented = struct { - instruction: Instruction.Index, - - pub fn finish(self: WipUnimplemented, val: *llvm.Value, wip: *WipFunction) Value { - assert(wip.builder.useLibLlvm()); - wip.llvm.instructions.items[@intFromEnum(self.instruction)] = val; - return self.instruction.toValue(); - } - }; - - pub fn unimplemented( - self: *WipFunction, - ty: Type, - name: []const u8, - ) Allocator.Error!WipUnimplemented { - try self.ensureUnusedExtraCapacity(1, NoExtra, 0); - const instruction = try self.addInst(name, .{ - .tag = .unimplemented, - .data = @intFromEnum(ty), - }); - if (self.builder.useLibLlvm()) _ = self.llvm.instructions.addOneAssumeCapacity(); - return .{ .instruction = instruction }; - } - pub fn finish(self: *WipFunction) Allocator.Error!void { const gpa = self.builder.gpa; const function = self.function.ptr(self.builder); @@ -6035,19 +6151,19 @@ pub const WipFunction = struct { .arg, .block, => unreachable, + .atomicrmw => { + const extra = self.extraData(Instruction.AtomicRmw, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.AtomicRmw{ + .info = extra.info, + .ptr = instructions.map(extra.ptr), + .val = instructions.map(extra.val), + }); + }, .br, .fence, .@"ret void", - .unimplemented, .@"unreachable", => {}, - .extractelement => { - const extra = self.extraData(Instruction.ExtractElement, instruction.data); - instruction.data = wip_extra.addExtra(Instruction.ExtractElement{ - .val = instructions.map(extra.val), - .index = instructions.map(extra.index), - }); - }, .br_cond => { const extra = self.extraData(Instruction.BrCond, instruction.data); instruction.data = wip_extra.addExtra(Instruction.BrCond{ @@ -6076,6 +6192,24 @@ pub const WipFunction = struct { }); wip_extra.appendMappedValues(args, instructions); }, + .cmpxchg, + .@"cmpxchg weak", + => { + const extra = self.extraData(Instruction.CmpXchg, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.CmpXchg{ + .info = extra.info, + .ptr = instructions.map(extra.ptr), + .cmp = instructions.map(extra.cmp), + .new = instructions.map(extra.new), + }); + }, + .extractelement => { + const extra = self.extraData(Instruction.ExtractElement, instruction.data); + instruction.data = wip_extra.addExtra(Instruction.ExtractElement{ + .val = instructions.map(extra.val), + .index = instructions.map(extra.index), + }); + }, .extractvalue => { var extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data); const indices = extra.trail.next(extra.data.indices_len, u32, self); @@ -6121,8 +6255,6 @@ pub const WipFunction = struct { }, .load, .@"load atomic", - .@"load atomic volatile", - .@"load volatile", => { const extra = self.extraData(Instruction.Load, instruction.data); instruction.data = wip_extra.addExtra(Instruction.Load{ @@ -6164,8 +6296,6 @@ pub const WipFunction = struct { }, .store, .@"store atomic", - .@"store atomic volatile", - .@"store volatile", => { const extra = self.extraData(Instruction.Store, instruction.data); instruction.data = wip_extra.addExtra(Instruction.Store{ @@ -6619,6 +6749,15 @@ pub const IntegerCondition = enum(u6) { pub const MemoryAccessKind = enum(u1) { normal, @"volatile", + + pub fn format( + self: MemoryAccessKind, + comptime prefix: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + if (self != .normal) try writer.print("{s}{s}", .{ prefix, @tagName(self) }); + } }; pub const SyncScope = enum(u1) { @@ -6632,7 +6771,7 @@ pub const SyncScope = enum(u1) { writer: anytype, ) @TypeOf(writer).Error!void { if (self != .system) try writer.print( - \\{s} syncscope("{s}") + \\{s}syncscope("{s}") , .{ prefix, @tagName(self) }); } }; @@ -6652,15 +6791,18 @@ pub const AtomicOrdering = enum(u3) { _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - if (self != .none) try writer.print("{s} {s}", .{ prefix, @tagName(self) }); + if (self != .none) try writer.print("{s}{s}", .{ prefix, @tagName(self) }); } }; const MemoryAccessInfo = packed struct(u32) { - scope: SyncScope, - ordering: AtomicOrdering, - alignment: Alignment, - _: u22 = undefined, + access_kind: MemoryAccessKind = .normal, + atomic_rmw_operation: Function.Instruction.AtomicRmw.Operation = .none, + sync_scope: SyncScope, + success_ordering: AtomicOrdering, + failure_ordering: AtomicOrdering = .none, + alignment: Alignment = .default, + _: u13 = undefined, }; pub const FastMath = packed struct(u32) { @@ -7542,7 +7684,7 @@ pub fn init(options: Options) InitError!Builder { if (options.name.len > 0) self.source_filename = try self.string(options.name); self.initializeLLVMTarget(options.target.cpu.arch); if (self.useLibLlvm()) self.llvm.module = llvm.Module.createWithName( - (self.source_filename.slice(&self) orelse "").ptr, + (self.source_filename.slice(&self) orelse ""), self.llvm.context, ); @@ -8983,6 +9125,21 @@ pub fn printUnbuffered( }); }, .arg => unreachable, + .atomicrmw => |tag| { + const extra = + function.extraData(Function.Instruction.AtomicRmw, instruction.data); + try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.info.access_kind, + @tagName(extra.info.atomic_rmw_operation), + extra.ptr.fmt(function_index, self), + extra.val.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.alignment, + }); + }, .block => { block_incoming_len = instruction.data; const name = instruction_index.name(&function); @@ -9056,6 +9213,24 @@ pub fn printUnbuffered( }); try writer.writeByte('\n'); }, + .cmpxchg, + .@"cmpxchg weak", + => |tag| { + const extra = + function.extraData(Function.Instruction.CmpXchg, instruction.data); + try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.info.access_kind, + extra.ptr.fmt(function_index, self), + extra.cmp.fmt(function_index, self), + extra.new.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.failure_ordering, + extra.info.alignment, + }); + }, .extractelement => |tag| { const extra = function.extraData( Function.Instruction.ExtractElement, @@ -9084,7 +9259,11 @@ pub fn printUnbuffered( }, .fence => |tag| { const info: MemoryAccessInfo = @bitCast(instruction.data); - try writer.print(" {s}{}{}", .{ @tagName(tag), info.scope, info.ordering }); + try writer.print(" {s}{ }{ }", .{ + @tagName(tag), + info.sync_scope, + info.success_ordering, + }); }, .fneg, .@"fneg fast", @@ -9145,18 +9324,17 @@ pub fn printUnbuffered( }, .load, .@"load atomic", - .@"load atomic volatile", - .@"load volatile", => |tag| { const extra = function.extraData(Function.Instruction.Load, instruction.data); - try writer.print(" %{} = {s} {%}, {%}{}{}{, }\n", .{ + try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }\n", .{ instruction_index.name(&function).fmt(self), @tagName(tag), + extra.info.access_kind, extra.type.fmt(self), extra.ptr.fmt(function_index, self), - extra.info.scope, - extra.info.ordering, + extra.info.sync_scope, + extra.info.success_ordering, extra.info.alignment, }); }, @@ -9220,17 +9398,16 @@ pub fn printUnbuffered( }, .store, .@"store atomic", - .@"store atomic volatile", - .@"store volatile", => |tag| { const extra = function.extraData(Function.Instruction.Store, instruction.data); - try writer.print(" {s} {%}, {%}{}{}{, }\n", .{ + try writer.print(" {s}{ } {%}, {%}{ }{ }{, }\n", .{ @tagName(tag), + extra.info.access_kind, extra.val.fmt(function_index, self), extra.ptr.fmt(function_index, self), - extra.info.scope, - extra.info.ordering, + extra.info.sync_scope, + extra.info.success_ordering, extra.info.alignment, }); }, @@ -9254,25 +9431,6 @@ pub fn printUnbuffered( ); try writer.writeAll(" ]\n"); }, - .unimplemented => |tag| { - const ty: Type = @enumFromInt(instruction.data); - if (true) { - try writer.writeAll(" "); - switch (ty) { - .none, .void => {}, - else => try writer.print("%{} = ", .{ - instruction_index.name(&function).fmt(self), - }), - } - try writer.print("{s} {%}\n", .{ @tagName(tag), ty.fmt(self) }); - } else switch (ty) { - .none, .void => {}, - else => try writer.print(" %{} = load {%}, ptr undef\n", .{ - instruction_index.name(&function).fmt(self), - ty.fmt(self), - }), - } - }, .va_arg => |tag| { const extra = function.extraData(Function.Instruction.VaArg, instruction.data); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index d5a1445176..ccfdd9407c 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -330,10 +330,7 @@ pub const Value = opaque { pub const fnSetSubprogram = ZigLLVMFnSetSubprogram; extern fn ZigLLVMFnSetSubprogram(f: *Value, subprogram: *DISubprogram) void; - pub const setValueName = LLVMSetValueName; - extern fn LLVMSetValueName(Val: *Value, Name: [*:0]const u8) void; - - pub const setValueName2 = LLVMSetValueName2; + pub const setValueName = LLVMSetValueName2; extern fn LLVMSetValueName2(Val: *Value, Name: [*]const u8, NameLen: usize) void; pub const getValueName = LLVMGetValueName; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 5ae8944c67..f2b8cf9da5 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -1123,11 +1123,11 @@ void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim) { } ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression) { - return reinterpret_cast(reinterpret_cast(global_variable_expression)->getVariable()); + return reinterpret_cast(reinterpret_cast(global_variable_expression)->getVariable()); } void ZigLLVMAttachMetaData(LLVMValueRef Val, ZigLLVMDIGlobalVariableExpression *global_variable_expression) { - unwrap(Val)->addDebugInfo(reinterpret_cast(global_variable_expression)); + unwrap(Val)->addDebugInfo(reinterpret_cast(global_variable_expression)); } static_assert((Triple::ArchType)ZigLLVM_UnknownArch == Triple::UnknownArch, ""); -- cgit v1.2.3 From a66cd54f94cf675a67f6de594e3d59e7ca14b92f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 15:49:25 -0400 Subject: llvm: cleanup even more unused LLVM API bindings --- src/codegen/llvm.zig | 6 +-- src/codegen/llvm/Builder.zig | 67 ++++++++++++++++++++++++++++++++ src/codegen/llvm/bindings.zig | 90 ------------------------------------------- 3 files changed, 69 insertions(+), 94 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3684237cb2..156b3ac36e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -9775,10 +9775,8 @@ pub const FuncGen = struct { else try self.wip.cast(.bitcast, non_int_val, small_int_ty, ""); const shift_rhs = try o.builder.intValue(int_ty, running_bits); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = try self.wip.conv(.unsigned, small_int_val, int_ty, ""); + const extended_int_val = + try self.wip.conv(.unsigned, small_int_val, int_ty, ""); const shifted = try self.wip.bin(.shl, extended_int_val, shift_rhs, ""); running_int = try self.wip.bin(.@"or", running_int, shifted, ""); running_bits += ty_bit_size; diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 396580a664..aa78381ee7 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -6894,6 +6894,7 @@ pub const Constant = enum(u32) { @"and", @"or", xor, + select, @"asm", @"asm sideeffect", @"asm alignstack", @@ -7005,6 +7006,12 @@ pub const Constant = enum(u32) { rhs: Constant, }; + pub const Select = extern struct { + cond: Constant, + lhs: Constant, + rhs: Constant, + }; + pub const Assembly = extern struct { type: Type, assembly: String, @@ -7136,6 +7143,7 @@ pub const Constant = enum(u32) { .@"or", .xor, => builder.constantExtraData(Binary, item.data).lhs.typeOf(builder), + .select => builder.constantExtraData(Select, item.data).lhs.typeOf(builder), .@"asm", .@"asm sideeffect", .@"asm alignstack", @@ -7500,6 +7508,15 @@ pub const Constant = enum(u32) { extra.rhs.fmt(data.builder), }); }, + .select => |tag| { + const extra = data.builder.constantExtraData(Select, item.data); + try writer.print("{s} ({%}, {%}, {%})", .{ + @tagName(tag), + extra.cond.fmt(data.builder), + extra.lhs.fmt(data.builder), + extra.rhs.fmt(data.builder), + }); + }, .@"asm", .@"asm sideeffect", .@"asm alignstack", @@ -8802,6 +8819,20 @@ pub fn binValue(self: *Builder, tag: Constant.Tag, lhs: Constant, rhs: Constant) return (try self.binConst(tag, lhs, rhs)).toValue(); } +pub fn selectConst( + self: *Builder, + cond: Constant, + lhs: Constant, + rhs: Constant, +) Allocator.Error!Constant { + try self.ensureUnusedConstantCapacity(1, Constant.Select, 0); + return self.selectConstAssumeCapacity(cond, lhs, rhs); +} + +pub fn selectValue(self: *Builder, cond: Constant, lhs: Constant, rhs: Constant) Allocator.Error!Value { + return (try self.selectConst(cond, lhs, rhs)).toValue(); +} + pub fn asmConst( self: *Builder, ty: Type, @@ -11063,6 +11094,42 @@ fn binConstAssumeCapacity( return @enumFromInt(gop.index); } +comptime { + _ = &selectValue; +} + +fn selectConstAssumeCapacity(self: *Builder, cond: Constant, lhs: Constant, rhs: Constant) Constant { + const Adapter = struct { + builder: *const Builder, + pub fn hash(_: @This(), key: Constant.Select) u32 { + return @truncate(std.hash.Wyhash.hash( + std.hash.uint32(@intFromEnum(Constant.Tag.select)), + std.mem.asBytes(&key), + )); + } + pub fn eql(ctx: @This(), lhs_key: Constant.Select, _: void, rhs_index: usize) bool { + if (ctx.builder.constant_items.items(.tag)[rhs_index] != .select) return false; + const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index]; + const rhs_extra = ctx.builder.constantExtraData(Constant.Select, rhs_data); + return std.meta.eql(lhs_key, rhs_extra); + } + }; + const data = Constant.Select{ .cond = cond, .lhs = lhs, .rhs = rhs }; + const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self }); + if (!gop.found_existing) { + gop.key_ptr.* = {}; + gop.value_ptr.* = {}; + self.constant_items.appendAssumeCapacity(.{ + .tag = .select, + .data = self.addConstantExtraAssumeCapacity(data), + }); + if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( + cond.toLlvm(self).constSelect(lhs.toLlvm(self), rhs.toLlvm(self)), + ); + } + return @enumFromInt(gop.index); +} + fn asmConstAssumeCapacity( self: *Builder, ty: Type, diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index ccfdd9407c..a756be784b 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -93,17 +93,6 @@ pub const Context = opaque { pub const constString = LLVMConstStringInContext; extern fn LLVMConstStringInContext(C: *Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) *Value; - pub const constStruct = LLVMConstStructInContext; - extern fn LLVMConstStructInContext( - C: *Context, - ConstantVals: [*]const *Value, - Count: c_uint, - Packed: Bool, - ) *Value; - - pub const createBasicBlock = LLVMCreateBasicBlockInContext; - extern fn LLVMCreateBasicBlockInContext(C: *Context, Name: [*:0]const u8) *BasicBlock; - pub const appendBasicBlock = LLVMAppendBasicBlockInContext; extern fn LLVMAppendBasicBlockInContext(C: *Context, Fn: *Value, Name: [*:0]const u8) *BasicBlock; @@ -127,9 +116,6 @@ pub const Value = opaque { pub const getFirstBasicBlock = LLVMGetFirstBasicBlock; extern fn LLVMGetFirstBasicBlock(Fn: *Value) ?*BasicBlock; - pub const appendExistingBasicBlock = LLVMAppendExistingBasicBlock; - extern fn LLVMAppendExistingBasicBlock(Fn: *Value, BB: *BasicBlock) void; - pub const addIncoming = LLVMAddIncoming; extern fn LLVMAddIncoming( PhiNode: *Value, @@ -138,9 +124,6 @@ pub const Value = opaque { Count: c_uint, ) void; - pub const getNextInstruction = LLVMGetNextInstruction; - extern fn LLVMGetNextInstruction(Inst: *Value) ?*Value; - pub const setGlobalConstant = LLVMSetGlobalConstant; extern fn LLVMSetGlobalConstant(GlobalVar: *Value, IsConstant: Bool) void; @@ -162,30 +145,9 @@ pub const Value = opaque { pub const deleteGlobal = LLVMDeleteGlobal; extern fn LLVMDeleteGlobal(GlobalVar: *Value) void; - pub const getNextGlobalAlias = LLVMGetNextGlobalAlias; - extern fn LLVMGetNextGlobalAlias(GA: *Value) *Value; - - pub const getAliasee = LLVMAliasGetAliasee; - extern fn LLVMAliasGetAliasee(Alias: *Value) *Value; - pub const setAliasee = LLVMAliasSetAliasee; extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void; - pub const constZExtOrBitCast = LLVMConstZExtOrBitCast; - extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value; - - pub const constNeg = LLVMConstNeg; - extern fn LLVMConstNeg(ConstantVal: *Value) *Value; - - pub const constNSWNeg = LLVMConstNSWNeg; - extern fn LLVMConstNSWNeg(ConstantVal: *Value) *Value; - - pub const constNUWNeg = LLVMConstNUWNeg; - extern fn LLVMConstNUWNeg(ConstantVal: *Value) *Value; - - pub const constNot = LLVMConstNot; - extern fn LLVMConstNot(ConstantVal: *Value) *Value; - pub const constAdd = LLVMConstAdd; extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value; @@ -309,9 +271,6 @@ pub const Value = opaque { pub const setVolatile = LLVMSetVolatile; extern fn LLVMSetVolatile(MemoryAccessInst: *Value, IsVolatile: Bool) void; - pub const setAtomicSingleThread = LLVMSetAtomicSingleThread; - extern fn LLVMSetAtomicSingleThread(AtomicInst: *Value, SingleThread: Bool) void; - pub const setAlignment = LLVMSetAlignment; extern fn LLVMSetAlignment(V: *Value, Bytes: c_uint) void; @@ -366,9 +325,6 @@ pub const Value = opaque { pub const getAlignment = LLVMGetAlignment; extern fn LLVMGetAlignment(V: *Value) c_uint; - pub const addByValAttr = ZigLLVMAddByValAttr; - extern fn ZigLLVMAddByValAttr(Fn: *Value, ArgNo: c_uint, type: *Type) void; - pub const attachMetaData = ZigLLVMAttachMetaData; extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void; @@ -380,9 +336,6 @@ pub const Type = opaque { pub const constNull = LLVMConstNull; extern fn LLVMConstNull(Ty: *Type) *Value; - pub const constAllOnes = LLVMConstAllOnes; - extern fn LLVMConstAllOnes(Ty: *Type) *Value; - pub const constInt = LLVMConstInt; extern fn LLVMConstInt(IntTy: *Type, N: c_ulonglong, SignExtend: Bool) *Value; @@ -476,9 +429,6 @@ pub const Module = opaque { pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace; extern fn ZigLLVMAddFunctionInAddressSpace(*Module, Name: [*:0]const u8, FunctionTy: *Type, AddressSpace: c_uint) *Value; - pub const getNamedFunction = LLVMGetNamedFunction; - extern fn LLVMGetNamedFunction(*Module, Name: [*:0]const u8) ?*Value; - pub const printToString = LLVMPrintModuleToString; extern fn LLVMPrintModuleToString(*Module) [*:0]const u8; @@ -488,18 +438,9 @@ pub const Module = opaque { pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace; extern fn LLVMAddGlobalInAddressSpace(M: *Module, Ty: *Type, Name: [*:0]const u8, AddressSpace: c_uint) *Value; - pub const getNamedGlobal = LLVMGetNamedGlobal; - extern fn LLVMGetNamedGlobal(M: *Module, Name: [*:0]const u8) ?*Value; - pub const dump = LLVMDumpModule; extern fn LLVMDumpModule(M: *Module) void; - pub const getFirstGlobalAlias = LLVMGetFirstGlobalAlias; - extern fn LLVMGetFirstGlobalAlias(M: *Module) *Value; - - pub const getLastGlobalAlias = LLVMGetLastGlobalAlias; - extern fn LLVMGetLastGlobalAlias(M: *Module) *Value; - pub const addAlias = LLVMAddAlias2; extern fn LLVMAddAlias2( M: *Module, @@ -541,9 +482,6 @@ pub const Module = opaque { extern fn LLVMWriteBitcodeToFile(M: *Module, Path: [*:0]const u8) c_int; }; -pub const lookupIntrinsicID = LLVMLookupIntrinsicID; -extern fn LLVMLookupIntrinsicID(Name: [*]const u8, NameLen: usize) c_uint; - pub const disposeMessage = LLVMDisposeMessage; extern fn LLVMDisposeMessage(Message: [*:0]const u8) void; @@ -604,12 +542,6 @@ pub const Builder = opaque { Instr: ?*Value, ) void; - pub const positionBuilderAtEnd = LLVMPositionBuilderAtEnd; - extern fn LLVMPositionBuilderAtEnd(Builder: *Builder, Block: *BasicBlock) void; - - pub const getInsertBlock = LLVMGetInsertBlock; - extern fn LLVMGetInsertBlock(Builder: *Builder) *BasicBlock; - pub const buildZExt = LLVMBuildZExt; extern fn LLVMBuildZExt( *Builder, @@ -618,14 +550,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildZExtOrBitCast = LLVMBuildZExtOrBitCast; - extern fn LLVMBuildZExtOrBitCast( - *Builder, - Val: *Value, - DestTy: *Type, - Name: [*:0]const u8, - ) *Value; - pub const buildSExt = LLVMBuildSExt; extern fn LLVMBuildSExt( *Builder, @@ -634,14 +558,6 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *Value; - pub const buildSExtOrBitCast = LLVMBuildSExtOrBitCast; - extern fn LLVMBuildSExtOrBitCast( - *Builder, - Val: *Value, - DestTy: *Type, - Name: [*:0]const u8, - ) *Value; - pub const buildCall = LLVMBuildCall2; extern fn LLVMBuildCall2( *Builder, @@ -670,12 +586,6 @@ pub const Builder = opaque { pub const buildLoad = LLVMBuildLoad2; extern fn LLVMBuildLoad2(*Builder, Ty: *Type, PointerVal: *Value, Name: [*:0]const u8) *Value; - pub const buildNeg = LLVMBuildNeg; - extern fn LLVMBuildNeg(*Builder, V: *Value, Name: [*:0]const u8) *Value; - - pub const buildNot = LLVMBuildNot; - extern fn LLVMBuildNot(*Builder, V: *Value, Name: [*:0]const u8) *Value; - pub const buildFAdd = LLVMBuildFAdd; extern fn LLVMBuildFAdd(*Builder, LHS: *Value, RHS: *Value, Name: [*:0]const u8) *Value; -- cgit v1.2.3 From 2499d8fb73b943640cbc7d0484377fffbee403c6 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 7 Aug 2023 23:50:40 -0400 Subject: Builder: fix enough bugs to pass the behavior tests without using any information from the LLVM API --- src/codegen/llvm.zig | 4 ++-- src/codegen/llvm/Builder.zig | 45 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 42 insertions(+), 7 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 156b3ac36e..c59f2de882 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6243,7 +6243,7 @@ pub const FuncGen = struct { if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr; const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); - return try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) + return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) // If this is a single-item pointer to an array, we need another index in the GEP. &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } else @@ -10532,7 +10532,7 @@ pub const FuncGen = struct { else => unreachable, }; - return try fg.wip.callAsm( + return fg.wip.callAsm( .none, try o.builder.fnType(llvm_usize, &.{ llvm_usize, llvm_usize }, .normal), .{ .sideeffect = true }, diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index aa78381ee7..94786fe662 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -5723,7 +5723,16 @@ pub const WipFunction = struct { .void => null, else => name, }, .{ - .tag = .call, + .tag = switch (kind) { + .normal => .call, + .fast => .@"call fast", + .musttail => .@"musttail call", + .musttail_fast => .@"musttail call fast", + .notail => .@"notail call", + .notail_fast => .@"notail call fast", + .tail => .@"tail call", + .tail_fast => .@"tail call fast", + }, .data = self.addExtraAssumeCapacity(Instruction.Call{ .info = .{ .call_conv = call_conv }, .attributes = function_attributes, @@ -7312,9 +7321,34 @@ pub const Constant = enum(u32) { .bfloat => 16, else => unreachable, } }), - .float => try writer.print("0x{X:0>16}", .{ - @as(u64, @bitCast(@as(f64, @as(f32, @bitCast(item.data))))), - }), + .float => { + const Float = struct { + fn Repr(comptime T: type) type { + return packed struct(std.meta.Int(.unsigned, @bitSizeOf(T))) { + mantissa: std.meta.Int(.unsigned, std.math.floatMantissaBits(T)), + exponent: std.meta.Int(.unsigned, std.math.floatExponentBits(T)), + sign: u1, + }; + } + }; + const Exponent32 = std.meta.FieldType(Float.Repr(f32), .exponent); + const Exponent64 = std.meta.FieldType(Float.Repr(f64), .exponent); + const repr: Float.Repr(f32) = @bitCast(item.data); + try writer.print("0x{X:0>16}", .{@as(u64, @bitCast(Float.Repr(f64){ + .mantissa = std.math.shl( + std.meta.FieldType(Float.Repr(f64), .mantissa), + repr.mantissa, + std.math.floatMantissaBits(f64) - std.math.floatMantissaBits(f32), + ), + .exponent = switch (repr.exponent) { + std.math.minInt(Exponent32) => std.math.minInt(Exponent64), + else => @as(Exponent64, repr.exponent) + + (std.math.floatExponentMax(f64) - std.math.floatExponentMax(f32)), + std.math.maxInt(Exponent32) => std.math.maxInt(Exponent64), + }, + .sign = repr.sign, + }))}); + }, .double => { const extra = data.builder.constantExtraData(Double, item.data); try writer.print("0x{X:0>8}{X:0>8}", .{ extra.hi, extra.lo }); @@ -8361,6 +8395,7 @@ pub fn getIntrinsic( var overload_index: usize = 0; function_attributes[FunctionAttributes.function_index] = try attributes.get(signature.attrs); + function_attributes[FunctionAttributes.return_index] = .none; // needed for void return for (0.., param_types, signature.params) |param_index, *param_type, signature_param| { switch (signature_param.kind) { .type => |ty| param_type.* = ty, @@ -9573,7 +9608,7 @@ fn fnTypeAssumeCapacity( gop.key_ptr.* = {}; gop.value_ptr.* = {}; self.type_items.appendAssumeCapacity(.{ - .tag = .function, + .tag = tag, .data = self.addTypeExtraAssumeCapacity(Type.Function{ .ret = ret, .params_len = @intCast(params.len), -- cgit v1.2.3 From 2bdd180c6f6c76940ccfe8c8532fefec208661ea Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 8 Aug 2023 10:15:11 -0400 Subject: llvm: finish converting globals --- src/codegen/llvm.zig | 1083 +++++++++++++++++------------------------ src/codegen/llvm/Builder.zig | 630 +++++++++++++++++++----- src/codegen/llvm/bindings.zig | 45 +- src/zig_llvm.cpp | 16 + src/zig_llvm.h | 4 + 5 files changed, 987 insertions(+), 791 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c59f2de882..a443a2184e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -764,7 +764,6 @@ pub const Object = struct { builder: Builder, module: *Module, - llvm_module: *llvm.Module, di_builder: ?*llvm.DIBuilder, /// One of these mappings: /// - *Module.File => *DIFile @@ -945,9 +944,8 @@ pub const Object = struct { .gpa = gpa, .builder = builder, .module = options.module.?, - .llvm_module = builder.llvm.module.?, .di_map = .{}, - .di_builder = builder.llvm.di_builder, + .di_builder = if (builder.useLibLlvm()) builder.llvm.di_builder else null, // TODO .di_compile_unit = builder.llvm.di_compile_unit, .target_machine = target_machine, .target_data = target_data, @@ -991,9 +989,8 @@ pub const Object = struct { } fn genErrorNameTable(o: *Object) Allocator.Error!void { - // If o.error_name_table is null, there was no instruction that actually referenced the error table. - const error_name_table_ptr_global = o.error_name_table; - if (error_name_table_ptr_global == .none) return; + // If o.error_name_table is null, then it was not referenced by any instructions. + if (o.error_name_table == .none) return; const mod = o.module; @@ -1003,72 +1000,42 @@ pub const Object = struct { // TODO: Address space const slice_ty = Type.slice_const_u8_sentinel_0; - const slice_alignment = slice_ty.abiAlignment(mod); const llvm_usize_ty = try o.lowerType(Type.usize); const llvm_slice_ty = try o.lowerType(slice_ty); const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty); llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty); - for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { - const name = try o.builder.string(mod.intern_pool.stringToSlice(name_nts)); - const str_init = try o.builder.stringNullConst(name); - const str_ty = str_init.typeOf(&o.builder); - const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); - str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); - str_llvm_global.setLinkage(.Private); - str_llvm_global.setGlobalConstant(.True); - str_llvm_global.setUnnamedAddr(.True); - str_llvm_global.setAlignment(1); - - var str_global = Builder.Global{ - .linkage = .private, - .unnamed_addr = .unnamed_addr, - .type = str_ty, - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var str_variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .mutability = .constant, - .init = str_init, - .alignment = comptime Builder.Alignment.fromByteUnits(1), - }; - try o.builder.llvm.globals.append(o.gpa, str_llvm_global); - const global_index = try o.builder.addGlobal(.empty, str_global); - try o.builder.variables.append(o.gpa, str_variable); + for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name| { + const name_string = try o.builder.string(mod.intern_pool.stringToSlice(name)); + const name_init = try o.builder.stringNullConst(name_string); + const name_variable_index = + try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default); + try name_variable_index.setInitializer(name_init, &o.builder); + name_variable_index.setLinkage(.private, &o.builder); + name_variable_index.setMutability(.constant, &o.builder); + name_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); + name_variable_index.setAlignment(comptime Builder.Alignment.fromByteUnits(1), &o.builder); llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{ - global_index.toConst(), - try o.builder.intConst(llvm_usize_ty, name.slice(&o.builder).?.len), + name_variable_index.toConst(&o.builder), + try o.builder.intConst(llvm_usize_ty, name_string.slice(&o.builder).?.len), }); } - const error_name_table_init = try o.builder.arrayConst(llvm_table_ty, llvm_errors); - const error_name_table_global = o.llvm_module.addGlobal(llvm_table_ty.toLlvm(&o.builder), ""); - error_name_table_global.setInitializer(error_name_table_init.toLlvm(&o.builder)); - error_name_table_global.setLinkage(.Private); - error_name_table_global.setGlobalConstant(.True); - error_name_table_global.setUnnamedAddr(.True); - error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode - - var global = Builder.Global{ - .linkage = .private, - .unnamed_addr = .unnamed_addr, - .type = llvm_table_ty, - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .mutability = .constant, - .init = error_name_table_init, - .alignment = Builder.Alignment.fromByteUnits(slice_alignment), - }; - try o.builder.llvm.globals.append(o.gpa, error_name_table_global); - _ = try o.builder.addGlobal(.empty, global); - try o.builder.variables.append(o.gpa, variable); + const table_variable_index = try o.builder.addVariable(.empty, llvm_table_ty, .default); + try table_variable_index.setInitializer( + try o.builder.arrayConst(llvm_table_ty, llvm_errors), + &o.builder, + ); + table_variable_index.setLinkage(.private, &o.builder); + table_variable_index.setMutability(.constant, &o.builder); + table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); + table_variable_index.setAlignment( + Builder.Alignment.fromByteUnits(slice_ty.abiAlignment(mod)), + &o.builder, + ); - const error_name_table_ptr = error_name_table_global; - error_name_table_ptr_global.ptr(&o.builder).init = variable.global.toConst(); - error_name_table_ptr_global.toLlvm(&o.builder).setInitializer(error_name_table_ptr); + try o.error_name_table.setInitializer(table_variable_index.toConst(&o.builder), &o.builder); } fn genCmpLtErrorsLenFunction(o: *Object) !void { @@ -1181,17 +1148,7 @@ pub const Object = struct { } } - if (comp.verbose_llvm_bc) |path| { - const path_z = try comp.gpa.dupeZ(u8, path); - defer comp.gpa.free(path_z); - - const error_code = self.llvm_module.writeBitcodeToFile(path_z); - if (error_code != 0) { - log.err("dump LLVM module failed bc={s}: {d}", .{ - path, error_code, - }); - } - } + if (comp.verbose_llvm_bc) |path| _ = try self.builder.writeBitcodeToFile(path); var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); @@ -1200,20 +1157,10 @@ pub const Object = struct { const mod = comp.bin_file.options.module.?; const cache_dir = mod.zig_cache_artifact_directory; - if (std.debug.runtime_safety) { - var error_message: [*:0]const u8 = undefined; - // verifyModule always allocs the error_message even if there is no error - defer llvm.disposeMessage(error_message); - - if (self.llvm_module.verify(.ReturnStatus, &error_message).toBool()) { - std.debug.print("\n{s}\n", .{error_message}); - - if (try locPath(arena, comp.emit_llvm_ir, cache_dir)) |emit_llvm_ir_path| { - _ = self.llvm_module.printModuleToFile(emit_llvm_ir_path, &error_message); - } - - @panic("LLVM module verification failed"); - } + if (std.debug.runtime_safety and !try self.builder.verify()) { + if (try locPath(arena, comp.emit_llvm_ir, cache_dir)) |emit_llvm_ir_path| + _ = self.builder.printToFileZ(emit_llvm_ir_path); + @panic("LLVM module verification failed"); } var emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit| @@ -1233,12 +1180,17 @@ pub const Object = struct { emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, }); + if (!self.builder.useLibLlvm()) { + log.err("emitting without libllvm not implemented", .{}); + return error.FailedToEmit; + } + // Unfortunately, LLVM shits the bed when we ask for both binary and assembly. // So we call the entire pipeline multiple times if this is requested. var error_message: [*:0]const u8 = undefined; if (emit_asm_path != null and emit_bin_path != null) { if (self.target_machine.emitToFile( - self.llvm_module, + self.builder.llvm.module.?, &error_message, comp.bin_file.options.optimize_mode == .Debug, comp.bin_file.options.optimize_mode == .ReleaseSmall, @@ -1262,7 +1214,7 @@ pub const Object = struct { } if (self.target_machine.emitToFile( - self.llvm_module, + self.builder.llvm.module.?, &error_message, comp.bin_file.options.optimize_mode == .Debug, comp.bin_file.options.optimize_mode == .ReleaseSmall, @@ -1305,11 +1257,9 @@ pub const Object = struct { .err_msg = null, }; - const function = try o.resolveLlvmFunction(decl_index); - const global = function.ptrConst(&o.builder).global; - const llvm_func = global.toLlvm(&o.builder); + const function_index = try o.resolveLlvmFunction(decl_index); - var attributes = try function.ptrConst(&o.builder).attributes.toWip(&o.builder); + var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder); defer attributes.deinit(&o.builder); if (func.analysis(ip).is_noinline) { @@ -1354,17 +1304,14 @@ pub const Object = struct { } }, &o.builder); } - if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| { - function.ptr(&o.builder).section = try o.builder.string(section); - llvm_func.setSection(section); - } + if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| + function_index.setSection(try o.builder.string(section), &o.builder); var deinit_wip = true; - var wip = try Builder.WipFunction.init(&o.builder, function); + var wip = try Builder.WipFunction.init(&o.builder, function_index); defer if (deinit_wip) wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; - const builder = wip.llvm.builder; var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. @@ -1566,7 +1513,7 @@ pub const Object = struct { } } - function.setAttributes(try attributes.finish(&o.builder), &o.builder); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); var di_file: ?*llvm.DIFile = null; var di_scope: ?*llvm.DIScope = null; @@ -1585,7 +1532,7 @@ pub const Object = struct { const subprogram = dib.createFunction( di_file.?.toScope(), ip.stringToSlice(decl.name), - llvm_func.getValueName(), + function_index.name(&o.builder).slice(&o.builder).?, di_file.?, line_number, decl_di_ty, @@ -1598,7 +1545,7 @@ pub const Object = struct { ); try o.di_map.put(gpa, decl, subprogram.toNode()); - llvm_func.fnSetSubprogram(subprogram); + function_index.toLlvm(&o.builder).fnSetSubprogram(subprogram); di_scope = subprogram.toScope(); } @@ -1609,7 +1556,6 @@ pub const Object = struct { .liveness = liveness, .dg = &dg, .wip = wip, - .builder = builder, .ret_ptr = ret_ptr, .args = args.items, .arg_index = 0, @@ -1670,8 +1616,7 @@ pub const Object = struct { const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. - const global = self.decl_map.get(decl_index) orelse return; - const llvm_global = global.toLlvm(&self.builder); + const global_index = self.decl_map.get(decl_index) orelse return; const decl = mod.declPtr(decl_index); if (decl.isExtern(mod)) { const decl_name = decl_name: { @@ -1689,114 +1634,91 @@ pub const Object = struct { }; if (self.builder.getGlobal(decl_name)) |other_global| { - if (other_global.toLlvm(&self.builder) != llvm_global) { + if (other_global != global_index) { try self.extern_collisions.put(gpa, decl_index, {}); } } - try global.rename(decl_name, &self.builder); - global.ptr(&self.builder).unnamed_addr = .default; - llvm_global.setUnnamedAddr(.False); - global.ptr(&self.builder).linkage = .external; - llvm_global.setLinkage(.External); - if (mod.wantDllExports()) { - global.ptr(&self.builder).dll_storage_class = .default; - llvm_global.setDLLStorageClass(.Default); - } + try global_index.rename(decl_name, &self.builder); + global_index.setLinkage(.external, &self.builder); + global_index.setUnnamedAddr(.default, &self.builder); + if (mod.wantDllExports()) global_index.setDllStorageClass(.default, &self.builder); if (self.di_map.get(decl)) |di_node| { const decl_name_slice = decl_name.slice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func: *llvm.DISubprogram = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); + const linkage_name = llvm.MDString.get( + self.builder.llvm.context, + decl_name_slice.ptr, + decl_name_slice.len, + ); di_func.replaceLinkageName(linkage_name); } else { const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); + const linkage_name = llvm.MDString.get( + self.builder.llvm.context, + decl_name_slice.ptr, + decl_name_slice.len, + ); di_global.replaceLinkageName(linkage_name); } } if (decl.val.getVariable(mod)) |decl_var| { - if (decl_var.is_threadlocal) { - global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = - .generaldynamic; - llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); - } else { - global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = - .default; - llvm_global.setThreadLocalMode(.NotThreadLocal); - } - if (decl_var.is_weak_linkage) { - global.ptr(&self.builder).linkage = .extern_weak; - llvm_global.setLinkage(.ExternalWeak); - } + global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( + if (decl_var.is_threadlocal) .generaldynamic else .default, + &self.builder, + ); + if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &self.builder); } - global.ptr(&self.builder).updateAttributes(); } else if (exports.len != 0) { - const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name)); - try global.rename(exp_name, &self.builder); - global.ptr(&self.builder).unnamed_addr = .default; - llvm_global.setUnnamedAddr(.False); - if (mod.wantDllExports()) { - global.ptr(&self.builder).dll_storage_class = .dllexport; - llvm_global.setDLLStorageClass(.DLLExport); - } + const main_exp_name = try self.builder.string( + mod.intern_pool.stringToSlice(exports[0].opts.name), + ); + try global_index.rename(main_exp_name, &self.builder); + global_index.setUnnamedAddr(.default, &self.builder); + if (mod.wantDllExports()) global_index.setDllStorageClass(.dllexport, &self.builder); if (self.di_map.get(decl)) |di_node| { - const exp_name_slice = exp_name.slice(&self.builder).?; + const main_exp_name_slice = main_exp_name.slice(&self.builder).?; if (try decl.isFunction(mod)) { const di_func: *llvm.DISubprogram = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); + const linkage_name = llvm.MDString.get( + self.builder.llvm.context, + main_exp_name_slice.ptr, + main_exp_name_slice.len, + ); di_func.replaceLinkageName(linkage_name); } else { const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); - const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); + const linkage_name = llvm.MDString.get( + self.builder.llvm.context, + main_exp_name_slice.ptr, + main_exp_name_slice.len, + ); di_global.replaceLinkageName(linkage_name); } } - switch (exports[0].opts.linkage) { + global_index.setLinkage(switch (exports[0].opts.linkage) { .Internal => unreachable, - .Strong => { - global.ptr(&self.builder).linkage = .external; - llvm_global.setLinkage(.External); - }, - .Weak => { - global.ptr(&self.builder).linkage = .weak_odr; - llvm_global.setLinkage(.WeakODR); - }, - .LinkOnce => { - global.ptr(&self.builder).linkage = .linkonce_odr; - llvm_global.setLinkage(.LinkOnceODR); - }, - } - switch (exports[0].opts.visibility) { - .default => { - global.ptr(&self.builder).visibility = .default; - llvm_global.setVisibility(.Default); - }, - .hidden => { - global.ptr(&self.builder).visibility = .hidden; - llvm_global.setVisibility(.Hidden); - }, - .protected => { - global.ptr(&self.builder).visibility = .protected; - llvm_global.setVisibility(.Protected); - }, - } - if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { - switch (global.ptrConst(&self.builder).kind) { - inline .variable, .function => |impl_index| impl_index.ptr(&self.builder).section = + .Strong => .external, + .Weak => .weak_odr, + .LinkOnce => .linkonce_odr, + }, &self.builder); + global_index.setVisibility(switch (exports[0].opts.visibility) { + .default => .default, + .hidden => .hidden, + .protected => .protected, + }, &self.builder); + if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| + switch (global_index.ptrConst(&self.builder).kind) { + inline .variable, .function => |impl_index| impl_index.setSection( try self.builder.string(section), + &self.builder, + ), else => unreachable, - } - llvm_global.setSection(section); - } - if (decl.val.getVariable(mod)) |decl_var| { - if (decl_var.is_threadlocal) { - global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = - .generaldynamic; - llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); - } - } - global.ptr(&self.builder).updateAttributes(); + }; + if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal) + global_index.ptrConst(&self.builder).kind + .variable.setThreadLocal(.generaldynamic, &self.builder); // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. @@ -1805,49 +1727,47 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = mod.intern_pool.stringToSlice(exp.opts.name); - - if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { - alias.setAliasee(llvm_global); - } else { - _ = self.llvm_module.addAlias( - global.ptrConst(&self.builder).type.toLlvm(&self.builder), - 0, - llvm_global, - exp_name_z, - ); + const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exp.opts.name)); + if (self.builder.getGlobal(exp_name)) |global| { + switch (global.ptrConst(&self.builder).kind) { + .alias => |alias| { + alias.setAliasee(global_index.toConst(), &self.builder); + continue; + }, + .variable, .function => {}, + else => unreachable, + } } + _ = try self.builder.addAlias( + exp_name, + global_index.typeOf(&self.builder), + .default, + global_index.toConst(), + ); } } else { - const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod))); - try global.rename(fqn, &self.builder); - global.ptr(&self.builder).linkage = .internal; - llvm_global.setLinkage(.Internal); - if (mod.wantDllExports()) { - global.ptr(&self.builder).dll_storage_class = .default; - llvm_global.setDLLStorageClass(.Default); - } - global.ptr(&self.builder).unnamed_addr = .unnamed_addr; - llvm_global.setUnnamedAddr(.True); + const fqn = try self.builder.string( + mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)), + ); + try global_index.rename(fqn, &self.builder); + global_index.setLinkage(.internal, &self.builder); + if (mod.wantDllExports()) global_index.setDllStorageClass(.default, &self.builder); + global_index.setUnnamedAddr(.unnamed_addr, &self.builder); if (decl.val.getVariable(mod)) |decl_var| { - const single_threaded = mod.comp.bin_file.options.single_threaded; - if (decl_var.is_threadlocal and !single_threaded) { - global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = - .generaldynamic; - llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); - } else { - global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = - .default; - llvm_global.setThreadLocalMode(.NotThreadLocal); - } + global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( + if (decl_var.is_threadlocal and !mod.comp.bin_file.options.single_threaded) + .generaldynamic + else + .default, + &self.builder, + ); } - global.ptr(&self.builder).updateAttributes(); } } pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void { const global = self.decl_map.get(decl_index) orelse return; - global.toLlvm(&self.builder).deleteGlobal(); + global.delete(&self.builder); } fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile { @@ -2883,8 +2803,12 @@ pub const Object = struct { /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Function.Index { + fn resolveLlvmFunction( + o: *Object, + decl_index: Module.Decl.Index, + ) Allocator.Error!Builder.Function.Index { const mod = o.module; + const ip = &mod.intern_pool; const gpa = o.gpa; const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; @@ -2896,31 +2820,20 @@ pub const Object = struct { const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); - const fn_type = try o.lowerType(zig_fn_type); - - const ip = &mod.intern_pool; - const fqn = try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod))); - - const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.slice(&o.builder).?, fn_type.toLlvm(&o.builder), @intFromEnum(llvm_addrspace)); - - var global = Builder.Global{ - .type = fn_type, - .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, - }; - var function = Builder.Function{ - .global = @enumFromInt(o.builder.globals.count()), - }; + const function_index = try o.builder.addFunction( + try o.lowerType(zig_fn_type), + try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod))), + toLlvmAddressSpace(decl.@"addrspace", target), + ); + gop.value_ptr.* = function_index.ptrConst(&o.builder).global; var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); const is_extern = decl.isExtern(mod); if (!is_extern) { - global.linkage = .internal; - llvm_fn.setLinkage(.Internal); - global.unnamed_addr = .unnamed_addr; - llvm_fn.setUnnamedAddr(.True); + function_index.setLinkage(.internal, &o.builder); + function_index.setUnnamedAddr(.unnamed_addr, &o.builder); } else { if (target.isWasm()) { try attributes.addFnAttr(.{ .string = .{ @@ -2957,35 +2870,22 @@ pub const Object = struct { } switch (fn_info.cc) { - .Unspecified, .Inline => { - function.call_conv = .fastcc; - llvm_fn.setFunctionCallConv(.Fast); - }, - .Naked => { - try attributes.addFnAttr(.naked, &o.builder); - }, + .Unspecified, .Inline => function_index.setCallConv(.fastcc, &o.builder), + .Naked => try attributes.addFnAttr(.naked, &o.builder), .Async => { - function.call_conv = .fastcc; - llvm_fn.setFunctionCallConv(.Fast); + function_index.setCallConv(.fastcc, &o.builder); @panic("TODO: LLVM backend lower async function"); }, - else => { - function.call_conv = toLlvmCallConv(fn_info.cc, target); - llvm_fn.setFunctionCallConv(@enumFromInt(@intFromEnum(function.call_conv))); - }, + else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder), } - if (fn_info.alignment.toByteUnitsOptional()) |a| { - function.alignment = Builder.Alignment.fromByteUnits(a); - llvm_fn.setAlignment(@intCast(a)); - } + if (fn_info.alignment.toByteUnitsOptional()) |alignment| + function_index.setAlignment(Builder.Alignment.fromByteUnits(alignment), &o.builder); // Function attributes that are independent of analysis results of the function body. try o.addCommonFnAttributes(&attributes); - if (fn_info.return_type == .noreturn_type) { - try attributes.addFnAttr(.noreturn, &o.builder); - } + if (fn_info.return_type == .noreturn_type) try attributes.addFnAttr(.noreturn, &o.builder); // Add parameter attributes. We handle only the case of extern functions (no body) // because functions with bodies are handled in `updateFunc`. @@ -3007,9 +2907,7 @@ pub const Object = struct { Builder.Alignment.fromByteUnits(param_ty.toType().abiAlignment(mod)); try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, - .byref_mut => { - try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder); - }, + .byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder), // No attributes needed for these. .no_bits, .abi_sized_int, @@ -3025,11 +2923,8 @@ pub const Object = struct { }; } - try o.builder.llvm.globals.append(o.gpa, llvm_fn); - gop.value_ptr.* = try o.builder.addGlobal(fqn, global); - try o.builder.functions.append(o.gpa, function); - global.kind.function.setAttributes(try attributes.finish(&o.builder), &o.builder); - return global.kind.function; + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); + return function_index; } fn addCommonFnAttributes( @@ -3093,76 +2988,50 @@ pub const Object = struct { } } - fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Variable.Index { + fn resolveGlobalDecl( + o: *Object, + decl_index: Module.Decl.Index, + ) Allocator.Error!Builder.Variable.Index { const gop = try o.decl_map.getOrPut(o.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); const mod = o.module; const decl = mod.declPtr(decl_index); - const fqn = try o.builder.string(mod.intern_pool.stringToSlice( - try decl.getFullyQualifiedName(mod), - )); - - const target = mod.getTarget(); - - var global = Builder.Global{ - .addr_space = toLlvmGlobalAddressSpace(decl.@"addrspace", target), - .type = try o.lowerType(decl.ty), - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - }; - const is_extern = decl.isExtern(mod); - const name = if (is_extern) - try o.builder.string(mod.intern_pool.stringToSlice(decl.name)) - else - fqn; - const llvm_global = o.llvm_module.addGlobalInAddressSpace( - global.type.toLlvm(&o.builder), - fqn.slice(&o.builder).?, - @intFromEnum(global.addr_space), + + const variable_index = try o.builder.addVariable( + try o.builder.string(mod.intern_pool.stringToSlice( + if (is_extern) decl.name else try decl.getFullyQualifiedName(mod), + )), + try o.lowerType(decl.ty), + toLlvmGlobalAddressSpace(decl.@"addrspace", mod.getTarget()), ); + gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; // This is needed for declarations created by `@extern`. if (is_extern) { - global.unnamed_addr = .default; - llvm_global.setUnnamedAddr(.False); - global.linkage = .external; - llvm_global.setLinkage(.External); + variable_index.setLinkage(.external, &o.builder); + variable_index.setUnnamedAddr(.default, &o.builder); if (decl.val.getVariable(mod)) |decl_var| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (decl_var.is_threadlocal and !single_threaded) { - variable.thread_local = .generaldynamic; - llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); - } else { - variable.thread_local = .default; - llvm_global.setThreadLocalMode(.NotThreadLocal); - } - if (decl_var.is_weak_linkage) { - global.linkage = .extern_weak; - llvm_global.setLinkage(.ExternalWeak); - } + variable_index.setThreadLocal( + if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, + &o.builder, + ); + if (decl_var.is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder); } } else { - global.linkage = .internal; - llvm_global.setLinkage(.Internal); - global.unnamed_addr = .unnamed_addr; - llvm_global.setUnnamedAddr(.True); + variable_index.setLinkage(.internal, &o.builder); + variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); } - - try o.builder.llvm.globals.append(o.gpa, llvm_global); - gop.value_ptr.* = try o.builder.addGlobal(name, global); - try o.builder.variables.append(o.gpa, variable); - return global.kind.variable; + return variable_index; } fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { const ty = try o.lowerTypeInner(t); const mod = o.module; - if (std.debug.runtime_safety and false) check: { + if (std.debug.runtime_safety and o.builder.useLibLlvm() and false) check: { const llvm_ty = ty.toLlvm(&o.builder); if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; @@ -4533,65 +4402,22 @@ pub const DeclGen = struct { if (decl.val.getExternFunc(mod)) |extern_func| { _ = try o.resolveLlvmFunction(extern_func.decl); } else { - const target = mod.getTarget(); - const variable = try o.resolveGlobalDecl(decl_index); - const global = variable.ptrConst(&o.builder).global; - var llvm_global = global.toLlvm(&o.builder); - variable.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); - llvm_global.setAlignment(decl.getAlignment(mod)); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| { - variable.ptr(&o.builder).section = try o.builder.string(section); - llvm_global.setSection(section); - } + const variable_index = try o.resolveGlobalDecl(decl_index); + variable_index.setAlignment( + Builder.Alignment.fromByteUnits(decl.getAlignment(mod)), + &o.builder, + ); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| + variable_index.setSection(try o.builder.string(section), &o.builder); assert(decl.has_tv); const init_val = if (decl.val.getVariable(mod)) |decl_var| decl_var.init else init_val: { - variable.ptr(&o.builder).mutability = .constant; - llvm_global.setGlobalConstant(.True); + variable_index.setMutability(.constant, &o.builder); break :init_val decl.val.toIntern(); }; - if (init_val != .none) { - const llvm_init = try o.lowerValue(init_val); - const llvm_init_ty = llvm_init.typeOf(&o.builder); - if (global.ptrConst(&o.builder).type == llvm_init_ty) { - llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); - } else { - // LLVM does not allow us to change the type of globals. So we must - // create a new global with the correct type, copy all its attributes, - // and then update all references to point to the new global, - // delete the original, and rename the new one to the old one's name. - // This is necessary because LLVM does not support const bitcasting - // a struct with padding bytes, which is needed to lower a const union value - // to LLVM, when a field other than the most-aligned is active. Instead, - // we must lower to an unnamed struct, and pointer cast at usage sites - // of the global. Such an unnamed struct is the cause of the global type - // mismatch, because we don't have the LLVM type until the *value* is created, - // whereas the global needs to be created based on the type alone, because - // lowering the value may reference the global as a pointer. - // Related: https://github.com/ziglang/zig/issues/13265 - const llvm_global_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - const new_global = o.llvm_module.addGlobalInAddressSpace( - llvm_init_ty.toLlvm(&o.builder), - "", - @intFromEnum(llvm_global_addrspace), - ); - new_global.setLinkage(llvm_global.getLinkage()); - new_global.setUnnamedAddr(llvm_global.getUnnamedAddress()); - new_global.setAlignment(llvm_global.getAlignment()); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| - new_global.setSection(section); - new_global.setInitializer(llvm_init.toLlvm(&o.builder)); - // TODO: How should this work then the address space of a global changed? - llvm_global.replaceAllUsesWith(new_global); - new_global.takeName(llvm_global); - o.builder.llvm.globals.items[@intFromEnum(variable.ptrConst(&o.builder).global)] = - new_global; - llvm_global.deleteGlobal(); - llvm_global = new_global; - variable.ptr(&o.builder).mutability = .global; - global.ptr(&o.builder).type = llvm_init_ty; - } - variable.ptr(&o.builder).init = llvm_init; - } + try variable_index.setInitializer(switch (init_val) { + .none => .no_init, + else => try o.lowerValue(init_val), + }, &o.builder); if (o.di_builder) |dib| { const di_file = try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -4601,7 +4427,7 @@ pub const DeclGen = struct { const di_global = dib.createGlobalVariableExpression( di_file.toScope(), mod.intern_pool.stringToSlice(decl.name), - llvm_global.getValueName(), + variable_index.name(&o.builder).slice(&o.builder).?, di_file, line_number, try o.lowerDebugType(decl.ty, .full), @@ -4609,7 +4435,8 @@ pub const DeclGen = struct { ); try o.di_map.put(o.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern(mod)) llvm_global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) + variable_index.toLlvm(&o.builder).attachMetaData(di_global); } } } @@ -4621,7 +4448,6 @@ pub const FuncGen = struct { air: Air, liveness: Liveness, wip: Builder.WipFunction, - builder: *llvm.Builder, di_scope: ?*llvm.DIScope, di_file: ?*llvm.DIFile, base_line: u32, @@ -4710,38 +4536,22 @@ pub const FuncGen = struct { // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. const target = mod.getTarget(); - const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const llvm_ty = llvm_val.typeOf(&o.builder); - const llvm_alignment = tv.ty.abiAlignment(mod); - const llvm_global = o.llvm_module.addGlobalInAddressSpace(llvm_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_actual_addrspace)); - llvm_global.setInitializer(llvm_val.toLlvm(&o.builder)); - llvm_global.setLinkage(.Private); - llvm_global.setGlobalConstant(.True); - llvm_global.setUnnamedAddr(.True); - llvm_global.setAlignment(llvm_alignment); - - var global = Builder.Global{ - .linkage = .private, - .unnamed_addr = .unnamed_addr, - .addr_space = llvm_actual_addrspace, - .type = llvm_ty, - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .mutability = .constant, - .init = llvm_val, - .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), - }; - try o.builder.llvm.globals.append(o.gpa, llvm_global); - const global_index = try o.builder.addGlobal(.empty, global); - try o.builder.variables.append(o.gpa, variable); - + const variable_index = try o.builder.addVariable( + .empty, + llvm_val.typeOf(&o.builder), + toLlvmGlobalAddressSpace(.generic, target), + ); + try variable_index.setInitializer(llvm_val, &o.builder); + variable_index.setLinkage(.private, &o.builder); + variable_index.setMutability(.constant, &o.builder); + variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); + variable_index.setAlignment(Builder.Alignment.fromByteUnits( + tv.ty.abiAlignment(mod), + ), &o.builder); return o.builder.convConst( .unneeded, - global_index.toConst(), - try o.builder.ptrType(llvm_wanted_addrspace), + variable_index.toConst(&o.builder), + try o.builder.ptrType(toLlvmAddressSpace(.generic, target)), ); } @@ -4768,18 +4578,18 @@ pub const FuncGen = struct { const val: Builder.Value = switch (air_tags[inst]) { // zig fmt: off - .add => try self.airAdd(inst, false), - .add_optimized => try self.airAdd(inst, true), + .add => try self.airAdd(inst, .normal), + .add_optimized => try self.airAdd(inst, .fast), .add_wrap => try self.airAddWrap(inst), .add_sat => try self.airAddSat(inst), - .sub => try self.airSub(inst, false), - .sub_optimized => try self.airSub(inst, true), + .sub => try self.airSub(inst, .normal), + .sub_optimized => try self.airSub(inst, .fast), .sub_wrap => try self.airSubWrap(inst), .sub_sat => try self.airSubSat(inst), - .mul => try self.airMul(inst, false), - .mul_optimized => try self.airMul(inst, true), + .mul => try self.airMul(inst, .normal), + .mul_optimized => try self.airMul(inst, .fast), .mul_wrap => try self.airMulWrap(inst), .mul_sat => try self.airMulSat(inst), @@ -4787,12 +4597,12 @@ pub const FuncGen = struct { .sub_safe => try self.airSafeArithmetic(inst, .@"ssub.with.overflow", .@"usub.with.overflow"), .mul_safe => try self.airSafeArithmetic(inst, .@"smul.with.overflow", .@"umul.with.overflow"), - .div_float => try self.airDivFloat(inst, false), - .div_trunc => try self.airDivTrunc(inst, false), - .div_floor => try self.airDivFloor(inst, false), - .div_exact => try self.airDivExact(inst, false), - .rem => try self.airRem(inst, false), - .mod => try self.airMod(inst, false), + .div_float => try self.airDivFloat(inst, .normal), + .div_trunc => try self.airDivTrunc(inst, .normal), + .div_floor => try self.airDivFloor(inst, .normal), + .div_exact => try self.airDivExact(inst, .normal), + .rem => try self.airRem(inst, .normal), + .mod => try self.airMod(inst, .normal), .ptr_add => try self.airPtrAdd(inst), .ptr_sub => try self.airPtrSub(inst), .shl => try self.airShl(inst), @@ -4803,12 +4613,12 @@ pub const FuncGen = struct { .slice => try self.airSlice(inst), .mul_add => try self.airMulAdd(inst), - .div_float_optimized => try self.airDivFloat(inst, true), - .div_trunc_optimized => try self.airDivTrunc(inst, true), - .div_floor_optimized => try self.airDivFloor(inst, true), - .div_exact_optimized => try self.airDivExact(inst, true), - .rem_optimized => try self.airRem(inst, true), - .mod_optimized => try self.airMod(inst, true), + .div_float_optimized => try self.airDivFloat(inst, .fast), + .div_trunc_optimized => try self.airDivTrunc(inst, .fast), + .div_floor_optimized => try self.airDivFloor(inst, .fast), + .div_exact_optimized => try self.airDivExact(inst, .fast), + .rem_optimized => try self.airRem(inst, .fast), + .mod_optimized => try self.airMod(inst, .fast), .add_with_overflow => try self.airOverflow(inst, .@"sadd.with.overflow", .@"uadd.with.overflow"), .sub_with_overflow => try self.airOverflow(inst, .@"ssub.with.overflow", .@"usub.with.overflow"), @@ -4836,25 +4646,25 @@ pub const FuncGen = struct { .round => try self.airUnaryOp(inst, .round), .trunc_float => try self.airUnaryOp(inst, .trunc), - .neg => try self.airNeg(inst, false), - .neg_optimized => try self.airNeg(inst, true), - - .cmp_eq => try self.airCmp(inst, .eq, false), - .cmp_gt => try self.airCmp(inst, .gt, false), - .cmp_gte => try self.airCmp(inst, .gte, false), - .cmp_lt => try self.airCmp(inst, .lt, false), - .cmp_lte => try self.airCmp(inst, .lte, false), - .cmp_neq => try self.airCmp(inst, .neq, false), - - .cmp_eq_optimized => try self.airCmp(inst, .eq, true), - .cmp_gt_optimized => try self.airCmp(inst, .gt, true), - .cmp_gte_optimized => try self.airCmp(inst, .gte, true), - .cmp_lt_optimized => try self.airCmp(inst, .lt, true), - .cmp_lte_optimized => try self.airCmp(inst, .lte, true), - .cmp_neq_optimized => try self.airCmp(inst, .neq, true), - - .cmp_vector => try self.airCmpVector(inst, false), - .cmp_vector_optimized => try self.airCmpVector(inst, true), + .neg => try self.airNeg(inst, .normal), + .neg_optimized => try self.airNeg(inst, .fast), + + .cmp_eq => try self.airCmp(inst, .eq, .normal), + .cmp_gt => try self.airCmp(inst, .gt, .normal), + .cmp_gte => try self.airCmp(inst, .gte, .normal), + .cmp_lt => try self.airCmp(inst, .lt, .normal), + .cmp_lte => try self.airCmp(inst, .lte, .normal), + .cmp_neq => try self.airCmp(inst, .neq, .normal), + + .cmp_eq_optimized => try self.airCmp(inst, .eq, .fast), + .cmp_gt_optimized => try self.airCmp(inst, .gt, .fast), + .cmp_gte_optimized => try self.airCmp(inst, .gte, .fast), + .cmp_lt_optimized => try self.airCmp(inst, .lt, .fast), + .cmp_lte_optimized => try self.airCmp(inst, .lte, .fast), + .cmp_neq_optimized => try self.airCmp(inst, .neq, .fast), + + .cmp_vector => try self.airCmpVector(inst, .normal), + .cmp_vector_optimized => try self.airCmpVector(inst, .fast), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), .is_non_null => try self.airIsNonNull(inst, false, .ne), @@ -4906,8 +4716,8 @@ pub const FuncGen = struct { .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0), .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1), - .int_from_float => try self.airIntFromFloat(inst, false), - .int_from_float_optimized => try self.airIntFromFloat(inst, true), + .int_from_float => try self.airIntFromFloat(inst, .normal), + .int_from_float_optimized => try self.airIntFromFloat(inst, .fast), .array_to_slice => try self.airArrayToSlice(inst), .float_from_int => try self.airFloatFromInt(inst), @@ -4939,8 +4749,8 @@ pub const FuncGen = struct { .is_named_enum_value => try self.airIsNamedEnumValue(inst), .error_set_has_value => try self.airErrorSetHasValue(inst), - .reduce => try self.airReduce(inst, false), - .reduce_optimized => try self.airReduce(inst, true), + .reduce => try self.airReduce(inst, .normal), + .reduce_optimized => try self.airReduce(inst, .fast), .atomic_store_unordered => try self.airAtomicStore(inst, .unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic), @@ -5466,7 +5276,7 @@ pub const FuncGen = struct { const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - _ = try self.wip.callIntrinsic(.none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); + _ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, ""); return if (isByRef(va_list_ty, mod)) dest_list else @@ -5477,7 +5287,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const src_list = try self.resolveInst(un_op); - _ = try self.wip.callIntrinsic(.none, .va_end, &.{}, &.{src_list}, ""); + _ = try self.wip.callIntrinsic(.normal, .none, .va_end, &.{}, &.{src_list}, ""); return .none; } @@ -5490,27 +5300,28 @@ pub const FuncGen = struct { const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); - _ = try self.wip.callIntrinsic(.none, .va_start, &.{}, &.{dest_list}, ""); + _ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, ""); return if (isByRef(va_list_ty, mod)) dest_list else try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); } - fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airCmp( + self: *FuncGen, + inst: Air.Inst.Index, + op: math.CompareOperator, + fast: Builder.FastMathKind, + ) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const operand_ty = self.typeOf(bin_op.lhs); - return self.cmp(lhs, rhs, operand_ty, op); + return self.cmp(fast, op, operand_ty, lhs, rhs); } - fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; @@ -5519,7 +5330,7 @@ pub const FuncGen = struct { const vec_ty = self.typeOf(extra.lhs); const cmp_op = extra.compareOperator(); - return self.cmp(lhs, rhs, vec_ty, cmp_op); + return self.cmp(fast, cmp_op, vec_ty, lhs, rhs); } fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -5540,10 +5351,11 @@ pub const FuncGen = struct { fn cmp( self: *FuncGen, + fast: Builder.FastMathKind, + op: math.CompareOperator, + operand_ty: Type, lhs: Builder.Value, rhs: Builder.Value, - operand_ty: Type, - op: math.CompareOperator, ) Allocator.Error!Builder.Value { const o = self.dg.object; const mod = o.module; @@ -5595,7 +5407,7 @@ pub const FuncGen = struct { self.wip.cursor = .{ .block = both_pl_block }; const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true); const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true); - const payload_cmp = try self.cmp(lhs_payload, rhs_payload, payload_ty, op); + const payload_cmp = try self.cmp(fast, op, payload_ty, lhs_payload, rhs_payload); _ = try self.wip.br(end_block); const both_pl_block_end = self.wip.cursor.block; @@ -5624,7 +5436,7 @@ pub const FuncGen = struct { ); return phi.toValue(); }, - .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), + .Float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }), else => unreachable, }; const is_signed = int_ty.isSignedInt(mod); @@ -5995,8 +5807,12 @@ pub const FuncGen = struct { ); } - fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); + fn airIntFromFloat( + self: *FuncGen, + inst: Air.Inst.Index, + fast: Builder.FastMathKind, + ) !Builder.Value { + _ = fast; const o = self.dg.object; const mod = o.module; @@ -6414,6 +6230,8 @@ pub const FuncGen = struct { } fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + if (!self.dg.object.builder.useLibLlvm()) return .none; + const di_scope = self.di_scope orelse return .none; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1); @@ -6422,12 +6240,19 @@ pub const FuncGen = struct { self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc else null; - self.builder.setCurrentDebugLocation(self.prev_dbg_line, self.prev_dbg_column, di_scope, inlined_at); + self.wip.llvm.builder.setCurrentDebugLocation( + self.prev_dbg_line, + self.prev_dbg_column, + di_scope, + inlined_at, + ); return .none; } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + const dib = o.di_builder orelse return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; @@ -6438,7 +6263,7 @@ pub const FuncGen = struct { const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; - const cur_debug_location = self.builder.getCurrentDebugLocation2(); + const cur_debug_location = self.wip.llvm.builder.getCurrentDebugLocation2(); try self.dbg_inlined.append(self.gpa, .{ .loc = @ptrCast(cur_debug_location), @@ -6486,6 +6311,8 @@ pub const FuncGen = struct { fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + if (o.di_builder == null) return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; @@ -6501,6 +6328,8 @@ pub const FuncGen = struct { fn airDbgBlockBegin(self: *FuncGen) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + const dib = o.di_builder orelse return .none; const old_scope = self.di_scope.?; try self.dbg_block_stack.append(self.gpa, old_scope); @@ -6511,6 +6340,8 @@ pub const FuncGen = struct { fn airDbgBlockEnd(self: *FuncGen) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + if (o.di_builder == null) return .none; self.di_scope = self.dbg_block_stack.pop(); return .none; @@ -6518,6 +6349,8 @@ pub const FuncGen = struct { fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + const mod = o.module; const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; @@ -6546,6 +6379,8 @@ pub const FuncGen = struct { fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; + if (!o.builder.useLibLlvm()) return .none; + const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -7346,7 +7181,7 @@ pub const FuncGen = struct { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - return self.wip.callIntrinsic(.none, .@"wasm.memory.size", &.{.i32}, &.{ + return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.size", &.{.i32}, &.{ try o.builder.intValue(.i32, index), }, ""); } @@ -7355,7 +7190,7 @@ pub const FuncGen = struct { const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - return self.wip.callIntrinsic(.none, .@"wasm.memory.grow", &.{.i32}, &.{ + return self.wip.callIntrinsic(.normal, .none, .@"wasm.memory.grow", &.{.i32}, &.{ try o.builder.intValue(.i32, index), try self.resolveInst(pl_op.operand), }, ""); } @@ -7391,8 +7226,9 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, .normal, inst_ty, 2, .{ lhs, rhs }); return self.wip.callIntrinsic( + .normal, .none, if (scalar_ty.isSignedInt(mod)) .smin else .umin, &.{try o.lowerType(inst_ty)}, @@ -7410,8 +7246,9 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, .normal, inst_ty, 2, .{ lhs, rhs }); return self.wip.callIntrinsic( + .normal, .none, if (scalar_ty.isSignedInt(mod)) .smax else .umax, &.{try o.lowerType(inst_ty)}, @@ -7430,9 +7267,7 @@ pub const FuncGen = struct { return self.wip.buildAggregate(try o.lowerType(inst_ty), &.{ ptr, len }, ""); } - fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7441,7 +7276,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, fast, inst_ty, 2, .{ lhs, rhs }); return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, ""); } @@ -7463,12 +7298,13 @@ pub const FuncGen = struct { const intrinsic = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_inst_ty = try o.lowerType(inst_ty); const results = - try fg.wip.callIntrinsic(.none, intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); + try fg.wip.callIntrinsic(.normal, .none, intrinsic, &.{llvm_inst_ty}, &.{ lhs, rhs }, ""); const overflow_bits = try fg.wip.extractValue(results, &.{1}, ""); const overflow_bits_ty = overflow_bits.typeOfWip(&fg.wip); const overflow_bit = if (overflow_bits_ty.isVector(&o.builder)) try fg.wip.callIntrinsic( + .normal, .none, .@"vector.reduce.or", &.{overflow_bits_ty}, @@ -7508,6 +7344,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); return self.wip.callIntrinsic( + .normal, .none, if (scalar_ty.isSignedInt(mod)) .@"sadd.sat" else .@"uadd.sat", &.{try o.lowerType(inst_ty)}, @@ -7516,9 +7353,7 @@ pub const FuncGen = struct { ); } - fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7527,7 +7362,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, fast, inst_ty, 2, .{ lhs, rhs }); return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, ""); } @@ -7550,6 +7385,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); return self.wip.callIntrinsic( + .normal, .none, if (scalar_ty.isSignedInt(mod)) .@"ssub.sat" else .@"usub.sat", &.{try o.lowerType(inst_ty)}, @@ -7558,9 +7394,7 @@ pub const FuncGen = struct { ); } - fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7569,7 +7403,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, fast, inst_ty, 2, .{ lhs, rhs }); return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, ""); } @@ -7592,6 +7426,7 @@ pub const FuncGen = struct { if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); return self.wip.callIntrinsic( + .normal, .none, if (scalar_ty.isSignedInt(mod)) .@"smul.fix.sat" else .@"umul.fix.sat", &.{try o.lowerType(inst_ty)}, @@ -7600,20 +7435,16 @@ pub const FuncGen = struct { ); } - fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); + return self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs }); } - fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7623,15 +7454,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { - const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); + const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs }); + return self.buildFloatOp(.trunc, fast, inst_ty, 1, .{result}); } return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, ""); } - fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7641,8 +7470,8 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { - const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - return self.buildFloatOp(.floor, inst_ty, 1, .{result}); + const result = try self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs }); + return self.buildFloatOp(.floor, fast, inst_ty, 1, .{result}); } if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try o.lowerType(inst_ty); @@ -7657,15 +7486,13 @@ pub const FuncGen = struct { const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, ""); const zero = try o.builder.zeroInitValue(inst_llvm_ty); const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); - const correction = try self.wip.select(rem_nonzero, div_sign_mask, zero, ""); + const correction = try self.wip.select(.normal, rem_nonzero, div_sign_mask, zero, ""); return self.wip.bin(.@"add nsw", div, correction, ""); } return self.wip.bin(.udiv, lhs, rhs, ""); } - fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7674,16 +7501,16 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - return self.wip.bin(if (scalar_ty.isSignedInt(mod)) - .@"sdiv exact" - else - .@"udiv exact", lhs, rhs, ""); + if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, fast, inst_ty, 2, .{ lhs, rhs }); + return self.wip.bin( + if (scalar_ty.isSignedInt(mod)) .@"sdiv exact" else .@"udiv exact", + lhs, + rhs, + "", + ); } - fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7692,16 +7519,15 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); - if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); + if (scalar_ty.isRuntimeFloat()) + return self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs }); return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .srem else .urem, lhs, rhs, ""); } - fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7712,12 +7538,12 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { - const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs }); - const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs }); + const a = try self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ lhs, rhs }); + const b = try self.buildFloatOp(.add, fast, inst_ty, 2, .{ a, rhs }); + const c = try self.buildFloatOp(.fmod, fast, inst_ty, 2, .{ b, rhs }); const zero = try o.builder.zeroInitValue(inst_llvm_ty); - const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); - return self.wip.select(ltz, c, a, ""); + const ltz = try self.buildFloatCmp(fast, .lt, inst_ty, .{ lhs, zero }); + return self.wip.select(fast, ltz, c, a, ""); } if (scalar_ty.isSignedInt(mod)) { const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst( @@ -7731,7 +7557,7 @@ pub const FuncGen = struct { const rhs_masked = try self.wip.bin(.@"and", rhs, div_sign_mask, ""); const zero = try o.builder.zeroInitValue(inst_llvm_ty); const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); - const correction = try self.wip.select(rem_nonzero, rhs_masked, zero, ""); + const correction = try self.wip.select(.normal, rem_nonzero, rhs_masked, zero, ""); return self.wip.bin(.@"add nsw", rem, correction, ""); } return self.wip.bin(.urem, lhs, rhs, ""); @@ -7804,7 +7630,7 @@ pub const FuncGen = struct { const llvm_inst_ty = try o.lowerType(inst_ty); const llvm_lhs_ty = try o.lowerType(lhs_ty); const results = - try self.wip.callIntrinsic(.none, intrinsic, &.{llvm_lhs_ty}, &.{ lhs, rhs }, ""); + try self.wip.callIntrinsic(.normal, .none, intrinsic, &.{llvm_lhs_ty}, &.{ lhs, rhs }, ""); const result_val = try self.wip.extractValue(results, &.{0}, ""); const overflow_bit = try self.wip.extractValue(results, &.{1}, ""); @@ -7879,13 +7705,18 @@ pub const FuncGen = struct { .function => |function| function, else => unreachable, }; - return o.builder.addFunction(try o.builder.fnType(return_type, param_types, .normal), fn_name); + return o.builder.addFunction( + try o.builder.fnType(return_type, param_types, .normal), + fn_name, + toLlvmAddressSpace(.generic, o.module.getTarget()), + ); } /// Creates a floating point comparison by lowering to the appropriate /// hardware instruction or softfloat routine for the target fn buildFloatCmp( self: *FuncGen, + fast: Builder.FastMathKind, pred: math.CompareOperator, ty: Type, params: [2]Builder.Value, @@ -7905,7 +7736,7 @@ pub const FuncGen = struct { .gt => .ogt, .gte => .oge, }; - return self.wip.fcmp(cond, params[0], params[1], ""); + return self.wip.fcmp(fast, cond, params[0], params[1], ""); } const float_bits = scalar_ty.floatBits(target); @@ -7996,6 +7827,7 @@ pub const FuncGen = struct { fn buildFloatOp( self: *FuncGen, comptime op: FloatOp, + fast: Builder.FastMathKind, ty: Type, comptime params_len: usize, params: [params_len]Builder.Value, @@ -8009,13 +7841,23 @@ pub const FuncGen = struct { if (op != .tan and intrinsicsAllowed(scalar_ty, target)) switch (op) { // Some operations are dedicated LLVM instructions, not available as intrinsics .neg => return self.wip.un(.fneg, params[0], ""), - .add, .sub, .mul, .div, .fmod => return self.wip.bin(switch (op) { - .add => .fadd, - .sub => .fsub, - .mul => .fmul, - .div => .fdiv, - .fmod => .frem, - else => unreachable, + .add, .sub, .mul, .div, .fmod => return self.wip.bin(switch (fast) { + .normal => switch (op) { + .add => .fadd, + .sub => .fsub, + .mul => .fmul, + .div => .fdiv, + .fmod => .frem, + else => unreachable, + }, + .fast => switch (op) { + .add => .@"fadd fast", + .sub => .@"fsub fast", + .mul => .@"fmul fast", + .div => .@"fdiv fast", + .fmod => .@"frem fast", + else => unreachable, + }, }, params[0], params[1], ""), .fmax, .fmin, @@ -8033,7 +7875,7 @@ pub const FuncGen = struct { .sqrt, .trunc, .fma, - => return self.wip.callIntrinsic(.none, switch (op) { + => return self.wip.callIntrinsic(fast, .none, switch (op) { .fmax => .maxnum, .fmin => .minnum, .ceil => .ceil, @@ -8108,7 +7950,7 @@ pub const FuncGen = struct { } return self.wip.call( - .normal, + fast.toCallKind(), .ccc, .none, libc_fn.typeOf(&o.builder), @@ -8127,7 +7969,7 @@ pub const FuncGen = struct { const addend = try self.resolveInst(pl_op.operand); const ty = self.typeOfIndex(inst); - return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); + return self.buildFloatOp(.fma, .normal, ty, 3, .{ mulend1, mulend2, addend }); } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -8248,6 +8090,7 @@ pub const FuncGen = struct { const llvm_lhs_ty = try o.lowerType(lhs_ty); const llvm_lhs_scalar_ty = llvm_lhs_ty.scalarType(&o.builder); const result = try self.wip.callIntrinsic( + .normal, .none, if (lhs_scalar_ty.isSignedInt(mod)) .@"sshl.sat" else .@"ushl.sat", &.{llvm_lhs_ty}, @@ -8269,7 +8112,7 @@ pub const FuncGen = struct { try o.builder.intConst(llvm_lhs_scalar_ty, -1), ); const in_range = try self.wip.icmp(.ult, rhs, bits, ""); - return self.wip.select(in_range, result, lhs_max, ""); + return self.wip.select(.normal, in_range, result, lhs_max, ""); } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { @@ -8682,14 +8525,14 @@ pub const FuncGen = struct { fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = try self.wip.callIntrinsic(.none, .trap, &.{}, &.{}, ""); + _ = try self.wip.callIntrinsic(.normal, .none, .trap, &.{}, &.{}, ""); _ = try self.wip.@"unreachable"(); return .none; } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = try self.wip.callIntrinsic(.none, .debugtrap, &.{}, &.{}, ""); + _ = try self.wip.callIntrinsic(.normal, .none, .debugtrap, &.{}, &.{}, ""); return .none; } @@ -8701,7 +8544,7 @@ pub const FuncGen = struct { // https://github.com/ziglang/zig/issues/11946 return o.builder.intValue(llvm_usize, 0); } - const result = try self.wip.callIntrinsic(.none, .returnaddress, &.{}, &.{ + const result = try self.wip.callIntrinsic(.normal, .none, .returnaddress, &.{}, &.{ try o.builder.intValue(.i32, 0), }, ""); return self.wip.cast(.ptrtoint, result, llvm_usize, ""); @@ -8710,7 +8553,7 @@ pub const FuncGen = struct { fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; - const result = try self.wip.callIntrinsic(.none, .frameaddress, &.{.ptr}, &.{ + const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{ try o.builder.intValue(.i32, 0), }, ""); return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), ""); @@ -8768,7 +8611,7 @@ pub const FuncGen = struct { if (optional_ty.optionalReprIsPayload(mod)) { const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip)); - return self.wip.select(success_bit, zero, payload, ""); + return self.wip.select(.normal, success_bit, zero, payload, ""); } comptime assert(optional_layout_version == 3); @@ -9053,8 +8896,8 @@ pub const FuncGen = struct { access_kind: Builder.MemoryAccessKind, ) !void { const o = self.dg.object; - const llvm_usize_ty = try o.lowerType(Type.usize); - const cond = try self.cmp(len, try o.builder.intValue(llvm_usize_ty, 0), Type.usize, .neq); + const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0); + const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero); const memset_block = try self.wip.block(1, "MemsetTrapSkip"); const end_block = try self.wip.block(2, "MemsetTrapEnd"); _ = try self.wip.brCond(cond, memset_block, end_block); @@ -9087,8 +8930,8 @@ pub const FuncGen = struct { std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and dest_ptr_ty.isSlice(mod)) { - const zero_usize = try o.builder.intValue(try o.lowerType(Type.usize), 0); - const cond = try self.cmp(len, zero_usize, Type.usize, .neq); + const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0); + const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero); const memcpy_block = try self.wip.block(1, "MemcpyTrapSkip"); const end_block = try self.wip.block(2, "MemcpyTrapEnd"); _ = try self.wip.brCond(cond, memcpy_block, end_block); @@ -9166,17 +9009,15 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - return self.buildFloatOp(op, operand_ty, 1, .{operand}); + return self.buildFloatOp(op, .normal, operand_ty, 1, .{operand}); } - fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); - + fn airNeg(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); + return self.buildFloatOp(.neg, fast, operand_ty, 1, .{operand}); } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { @@ -9187,6 +9028,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const result = try self.wip.callIntrinsic( + .normal, .none, intrinsic, &.{try o.lowerType(operand_ty)}, @@ -9204,6 +9046,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const result = try self.wip.callIntrinsic( + .normal, .none, intrinsic, &.{try o.lowerType(operand_ty)}, @@ -9242,7 +9085,8 @@ pub const FuncGen = struct { bits = bits + 8; } - const result = try self.wip.callIntrinsic(.none, .bswap, &.{llvm_operand_ty}, &.{operand}, ""); + const result = + try self.wip.callIntrinsic(.normal, .none, .bswap, &.{llvm_operand_ty}, &.{operand}, ""); return self.wip.conv(.unsigned, result, try o.lowerType(inst_ty), ""); } @@ -9309,20 +9153,18 @@ pub const FuncGen = struct { const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal), try o.builder.fmt("__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}), + toLlvmAddressSpace(.generic, mod.getTarget()), ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); try o.addCommonFnAttributes(&attributes); - function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); - function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; - function_index.ptr(&o.builder).call_conv = .fastcc; + function_index.setLinkage(.internal, &o.builder); + function_index.setCallConv(.fastcc, &o.builder); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); gop.value_ptr.* = function_index; - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); - var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9383,20 +9225,18 @@ pub const FuncGen = struct { const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal), try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}), + toLlvmAddressSpace(.generic, mod.getTarget()), ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); try o.addCommonFnAttributes(&attributes); - function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); - function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; - function_index.ptr(&o.builder).call_conv = .fastcc; + function_index.setLinkage(.internal, &o.builder); + function_index.setCallConv(.fastcc, &o.builder); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); - var wip = try Builder.WipFunction.init(&o.builder, function_index); defer wip.deinit(); wip.cursor = .{ .block = try wip.block(0, "Entry") }; @@ -9407,36 +9247,20 @@ pub const FuncGen = struct { try wip.@"switch"(tag_int_value, bad_value_block, @intCast(enum_type.names.len)); defer wip_switch.finish(&wip); - for (enum_type.names, 0..) |name_ip, field_index| { - const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip)); - const str_init = try o.builder.stringNullConst(name); - const str_ty = str_init.typeOf(&o.builder); - const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); - str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); - str_llvm_global.setLinkage(.Private); - str_llvm_global.setGlobalConstant(.True); - str_llvm_global.setUnnamedAddr(.True); - str_llvm_global.setAlignment(1); - - var str_global = Builder.Global{ - .linkage = .private, - .unnamed_addr = .unnamed_addr, - .type = str_ty, - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var str_variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .mutability = .constant, - .init = str_init, - .alignment = comptime Builder.Alignment.fromByteUnits(1), - }; - try o.builder.llvm.globals.append(o.gpa, str_llvm_global); - const global_index = try o.builder.addGlobal(.empty, str_global); - try o.builder.variables.append(o.gpa, str_variable); - - const slice_val = try o.builder.structValue(ret_ty, &.{ - global_index.toConst(), - try o.builder.intConst(usize_ty, name.slice(&o.builder).?.len), + for (enum_type.names, 0..) |name, field_index| { + const name_string = try o.builder.string(mod.intern_pool.stringToSlice(name)); + const name_init = try o.builder.stringNullConst(name_string); + const name_variable_index = + try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default); + try name_variable_index.setInitializer(name_init, &o.builder); + name_variable_index.setLinkage(.private, &o.builder); + name_variable_index.setMutability(.constant, &o.builder); + name_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); + name_variable_index.setAlignment(comptime Builder.Alignment.fromByteUnits(1), &o.builder); + + const name_val = try o.builder.structValue(ret_ty, &.{ + name_variable_index.toConst(&o.builder), + try o.builder.intConst(usize_ty, name_string.slice(&o.builder).?.len), }); const return_block = try wip.block(1, "Name"); @@ -9446,7 +9270,7 @@ pub const FuncGen = struct { try wip_switch.addCase(this_tag_int_value, return_block, &wip); wip.cursor = .{ .block = return_block }; - _ = try wip.ret(slice_val); + _ = try wip.ret(name_val); } wip.cursor = .{ .block = bad_value_block }; @@ -9465,19 +9289,16 @@ pub const FuncGen = struct { const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal), name, + toLlvmAddressSpace(.generic, o.module.getTarget()), ); var attributes: Builder.FunctionAttributes.Wip = .{}; defer attributes.deinit(&o.builder); try o.addCommonFnAttributes(&attributes); - function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); - - function_index.ptrConst(&o.builder).global.ptr(&o.builder).linkage = .internal; - function_index.ptr(&o.builder).call_conv = .fastcc; - - function_index.toLlvm(&o.builder).setLinkage(.Internal); - function_index.toLlvm(&o.builder).setFunctionCallConv(.Fast); + function_index.setLinkage(.internal, &o.builder); + function_index.setCallConv(.fastcc, &o.builder); + function_index.setAttributes(try attributes.finish(&o.builder), &o.builder); return function_index; } @@ -9511,7 +9332,7 @@ pub const FuncGen = struct { const a = try self.resolveInst(extra.lhs); const b = try self.resolveInst(extra.rhs); - return self.wip.select(pred, a, b, ""); + return self.wip.select(.normal, pred, a, b, ""); } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9623,8 +9444,7 @@ pub const FuncGen = struct { return self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, ""); } - fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { - self.builder.setFastMath(want_fast_math); + fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { const o = self.dg.object; const mod = o.module; const target = mod.getTarget(); @@ -9637,14 +9457,14 @@ pub const FuncGen = struct { const llvm_scalar_ty = try o.lowerType(scalar_ty); switch (reduce.operation) { - .And, .Or, .Xor => return self.wip.callIntrinsic(.none, switch (reduce.operation) { + .And, .Or, .Xor => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { .And => .@"vector.reduce.and", .Or => .@"vector.reduce.or", .Xor => .@"vector.reduce.xor", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Min, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.wip.callIntrinsic(.none, switch (reduce.operation) { + .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { .Min => if (scalar_ty.isSignedInt(mod)) .@"vector.reduce.smin" else @@ -9656,7 +9476,7 @@ pub const FuncGen = struct { else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Float => if (intrinsicsAllowed(scalar_ty, target)) - return self.wip.callIntrinsic(.none, switch (reduce.operation) { + return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) { .Min => .@"vector.reduce.fmin", .Max => .@"vector.reduce.fmax", else => unreachable, @@ -9664,13 +9484,13 @@ pub const FuncGen = struct { else => unreachable, }, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.wip.callIntrinsic(.none, switch (reduce.operation) { + .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { .Add => .@"vector.reduce.add", .Mul => .@"vector.reduce.mul", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Float => if (intrinsicsAllowed(scalar_ty, target)) - return self.wip.callIntrinsic(.none, switch (reduce.operation) { + return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) { .Add => .@"vector.reduce.fadd", .Mul => .@"vector.reduce.fmul", else => unreachable, @@ -10021,7 +9841,7 @@ pub const FuncGen = struct { .data => {}, } - _ = try self.wip.callIntrinsic(.none, .prefetch, &.{.ptr}, &.{ + _ = try self.wip.callIntrinsic(.normal, .none, .prefetch, &.{.ptr}, &.{ try self.resolveInst(prefetch.ptr), try o.builder.intValue(.i32, prefetch.rw), try o.builder.intValue(.i32, prefetch.locality), @@ -10045,7 +9865,7 @@ pub const FuncGen = struct { default: u32, comptime basename: []const u8, ) !Builder.Value { - return self.wip.callIntrinsic(.none, switch (dimension) { + return self.wip.callIntrinsic(.normal, .none, switch (dimension) { 0 => @field(Builder.Intrinsic, basename ++ ".x"), 1 => @field(Builder.Intrinsic, basename ++ ".y"), 2 => @field(Builder.Intrinsic, basename ++ ".z"), @@ -10074,7 +9894,8 @@ pub const FuncGen = struct { // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 - const dispatch_ptr = try self.wip.callIntrinsic(.none, .@"amdgcn.dispatch.ptr", &.{}, &.{}, ""); + const dispatch_ptr = + try self.wip.callIntrinsic(.normal, .none, .@"amdgcn.dispatch.ptr", &.{}, &.{}, ""); // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. @@ -10097,40 +9918,24 @@ pub const FuncGen = struct { fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; + const mod = o.module; + const table = o.error_name_table; if (table != .none) return table; - const mod = o.module; - const slice_ty = Type.slice_const_u8_sentinel_0; - const slice_alignment = slice_ty.abiAlignment(mod); - const undef_init = try o.builder.undefConst(.ptr); // TODO: Address space - - const name = try o.builder.string("__zig_err_name_table"); - const error_name_table_global = o.llvm_module.addGlobal(Builder.Type.ptr.toLlvm(&o.builder), name.slice(&o.builder).?); - error_name_table_global.setInitializer(undef_init.toLlvm(&o.builder)); - error_name_table_global.setLinkage(.Private); - error_name_table_global.setGlobalConstant(.True); - error_name_table_global.setUnnamedAddr(.True); - error_name_table_global.setAlignment(slice_alignment); - - var global = Builder.Global{ - .linkage = .private, - .unnamed_addr = .unnamed_addr, - .type = .ptr, - .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, - }; - var variable = Builder.Variable{ - .global = @enumFromInt(o.builder.globals.count()), - .mutability = .constant, - .init = undef_init, - .alignment = Builder.Alignment.fromByteUnits(slice_alignment), - }; - try o.builder.llvm.globals.append(o.gpa, error_name_table_global); - _ = try o.builder.addGlobal(name, global); - try o.builder.variables.append(o.gpa, variable); + // TODO: Address space + const variable_index = + try o.builder.addVariable(try o.builder.string("__zig_err_name_table"), .ptr, .default); + variable_index.setLinkage(.private, &o.builder); + variable_index.setMutability(.constant, &o.builder); + variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); + variable_index.setAlignment( + Builder.Alignment.fromByteUnits(Type.slice_const_u8_sentinel_0.abiAlignment(mod)), + &o.builder, + ); - o.error_name_table = global.kind.variable; - return global.kind.variable; + o.error_name_table = variable_index; + return variable_index; } /// Assumes the optional is not pointer-like and payload has bits. @@ -11547,15 +11352,19 @@ fn buildAllocaInner( const alloca = blk: { const prev_cursor = wip.cursor; - const prev_debug_location = wip.llvm.builder.getCurrentDebugLocation2(); + const prev_debug_location = if (wip.builder.useLibLlvm()) + wip.llvm.builder.getCurrentDebugLocation2() + else + undefined; defer { wip.cursor = prev_cursor; if (wip.cursor.block == .entry) wip.cursor.instruction += 1; - if (di_scope_non_null) wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location); + if (wip.builder.useLibLlvm() and di_scope_non_null) + wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location); } wip.cursor = .{ .block = .entry }; - wip.llvm.builder.clearCurrentDebugLocation(); + if (wip.builder.useLibLlvm()) wip.llvm.builder.clearCurrentDebugLocation(); break :blk try wip.alloca(.normal, llvm_ty, .none, alignment, address_space, ""); }; diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 94786fe662..eb1df06c0b 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -13,6 +13,7 @@ llvm: if (build_options.have_llvm) struct { types: std.ArrayListUnmanaged(*llvm.Type), globals: std.ArrayListUnmanaged(*llvm.Value), constants: std.ArrayListUnmanaged(*llvm.Value), + replacements: std.AutoHashMapUnmanaged(*llvm.Value, Global.Index), } else void, source_filename: String, @@ -1709,17 +1710,17 @@ pub const FunctionAttributes = enum(u32) { }; pub const Linkage = enum { - external, private, internal, - available_externally, - linkonce, weak, - common, + weak_odr, + linkonce, + linkonce_odr, + available_externally, appending, + common, extern_weak, - linkonce_odr, - weak_odr, + external, pub fn format( self: Linkage, @@ -1729,6 +1730,22 @@ pub const Linkage = enum { ) @TypeOf(writer).Error!void { if (self != .external) try writer.print(" {s}", .{@tagName(self)}); } + + fn toLlvm(self: Linkage) llvm.Linkage { + return switch (self) { + .private => .Private, + .internal => .Internal, + .weak => .WeakAny, + .weak_odr => .WeakODR, + .linkonce => .LinkOnceAny, + .linkonce_odr => .LinkOnceODR, + .available_externally => .AvailableExternally, + .appending => .Appending, + .common => .Common, + .extern_weak => .ExternalWeak, + .external => .External, + }; + } }; pub const Preemption = enum { @@ -1759,6 +1776,14 @@ pub const Visibility = enum { ) @TypeOf(writer).Error!void { if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } + + fn toLlvm(self: Visibility) llvm.Visibility { + return switch (self) { + .default => .Default, + .hidden => .Hidden, + .protected => .Protected, + }; + } }; pub const DllStorageClass = enum { @@ -1774,6 +1799,14 @@ pub const DllStorageClass = enum { ) @TypeOf(writer).Error!void { if (self != .default) try writer.print(" {s}", .{@tagName(self)}); } + + fn toLlvm(self: DllStorageClass) llvm.DLLStorageClass { + return switch (self) { + .default => .Default, + .dllimport => .DLLImport, + .dllexport => .DLLExport, + }; + } }; pub const ThreadLocal = enum { @@ -1785,20 +1818,28 @@ pub const ThreadLocal = enum { pub fn format( self: ThreadLocal, - comptime _: []const u8, + comptime prefix: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { if (self == .default) return; - try writer.writeAll(" thread_local"); - if (self != .generaldynamic) { - try writer.writeByte('('); - try writer.writeAll(@tagName(self)); - try writer.writeByte(')'); - } + try writer.print("{s}thread_local", .{prefix}); + if (self != .generaldynamic) try writer.print("({s})", .{@tagName(self)}); + } + + fn toLlvm(self: ThreadLocal) llvm.ThreadLocalMode { + return switch (self) { + .default => .NotThreadLocal, + .generaldynamic => .GeneralDynamicTLSModel, + .localdynamic => .LocalDynamicTLSModel, + .initialexec => .InitialExecTLSModel, + .localexec => .LocalExecTLSModel, + }; } }; +pub const Mutability = enum { global, constant }; + pub const UnnamedAddr = enum { default, unnamed_addr, @@ -2057,6 +2098,11 @@ pub const CallConv = enum(u10) { _ => try writer.print(" cc{d}", .{@intFromEnum(self)}), } } + + fn toLlvm(self: CallConv) llvm.CallConv { + // These enum values appear in LLVM IR, and so are guaranteed to be stable. + return @enumFromInt(@intFromEnum(self)); + } }; pub const Global = struct { @@ -2093,10 +2139,6 @@ pub const Global = struct { return self.unwrap(builder) == other.unwrap(builder); } - pub fn name(self: Index, builder: *const Builder) String { - return builder.globals.keys()[@intFromEnum(self.unwrap(builder))]; - } - pub fn ptr(self: Index, builder: *Builder) *Global { return &builder.globals.values()[@intFromEnum(self.unwrap(builder))]; } @@ -2105,6 +2147,10 @@ pub const Global = struct { return &builder.globals.values()[@intFromEnum(self.unwrap(builder))]; } + pub fn name(self: Index, builder: *const Builder) String { + return builder.globals.keys()[@intFromEnum(self.unwrap(builder))]; + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).type; } @@ -2113,6 +2159,30 @@ pub const Global = struct { return @enumFromInt(@intFromEnum(Constant.first_global) + @intFromEnum(self)); } + pub fn setLinkage(self: Index, linkage: Linkage, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setLinkage(linkage.toLlvm()); + self.ptr(builder).linkage = linkage; + self.updateDsoLocal(builder); + } + + pub fn setVisibility(self: Index, visibility: Visibility, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setVisibility(visibility.toLlvm()); + self.ptr(builder).visibility = visibility; + self.updateDsoLocal(builder); + } + + pub fn setDllStorageClass(self: Index, class: DllStorageClass, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setDLLStorageClass(class.toLlvm()); + self.ptr(builder).dll_storage_class = class; + } + + pub fn setUnnamedAddr(self: Index, unnamed_addr: UnnamedAddr, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setUnnamedAddr( + llvm.Bool.fromBool(unnamed_addr != .default), + ); + self.ptr(builder).unnamed_addr = unnamed_addr; + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); return builder.llvm.globals.items[@intFromEnum(self.unwrap(builder))]; @@ -2148,9 +2218,36 @@ pub const Global = struct { pub fn replace(self: Index, other: Index, builder: *Builder) Allocator.Error!void { try builder.ensureUnusedGlobalCapacity(.empty); + if (builder.useLibLlvm()) + try builder.llvm.replacements.ensureUnusedCapacity(builder.gpa, 1); self.replaceAssumeCapacity(other, builder); } + pub fn delete(self: Index, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).eraseGlobalValue(); + self.ptr(builder).kind = .{ .replaced = .none }; + } + + fn updateDsoLocal(self: Index, builder: *Builder) void { + const self_ptr = self.ptr(builder); + switch (self_ptr.linkage) { + .private, .internal => { + self_ptr.visibility = .default; + self_ptr.dll_storage_class = .default; + self_ptr.preemption = .implicit_dso_local; + }, + .extern_weak => if (self_ptr.preemption == .implicit_dso_local) { + self_ptr.preemption = .dso_local; + }, + else => switch (self_ptr.visibility) { + .default => if (self_ptr.preemption == .implicit_dso_local) { + self_ptr.preemption = .dso_local; + }, + else => self_ptr.preemption = .implicit_dso_local, + }, + } + } + fn renameAssumeCapacity(self: Index, new_name: String, builder: *Builder) void { const old_name = self.name(builder); if (new_name == old_name) return; @@ -2187,13 +2284,8 @@ pub const Global = struct { if (builder.useLibLlvm()) { const self_llvm = self.toLlvm(builder); self_llvm.replaceAllUsesWith(other.toLlvm(builder)); - switch (self.ptr(builder).kind) { - .alias, - .variable, - => self_llvm.deleteGlobal(), - .function => self_llvm.deleteFunction(), - .replaced => unreachable, - } + self_llvm.removeGlobalValue(); + builder.llvm.replacements.putAssumeCapacityNoClobber(self_llvm, other); } self.ptr(builder).kind = .{ .replaced = other.unwrap(builder) }; } @@ -2205,42 +2297,17 @@ pub const Global = struct { }; } }; - - pub fn updateAttributes(self: *Global) void { - switch (self.linkage) { - .private, .internal => { - self.visibility = .default; - self.dll_storage_class = .default; - self.preemption = .implicit_dso_local; - }, - .extern_weak => if (self.preemption == .implicit_dso_local) { - self.preemption = .dso_local; - }, - else => switch (self.visibility) { - .default => if (self.preemption == .implicit_dso_local) { - self.preemption = .dso_local; - }, - else => self.preemption = .implicit_dso_local, - }, - } - } }; pub const Alias = struct { global: Global.Index, thread_local: ThreadLocal = .default, - init: Constant = .no_init, + aliasee: Constant = .no_init, pub const Index = enum(u32) { none = std.math.maxInt(u32), _, - pub fn getAliasee(self: Index, builder: *const Builder) Global.Index { - const aliasee = self.ptrConst(builder).init.getBase(builder); - assert(aliasee != .none); - return aliasee; - } - pub fn ptr(self: Index, builder: *Builder) *Alias { return &builder.aliases.items[@intFromEnum(self)]; } @@ -2249,6 +2316,10 @@ pub const Alias = struct { return &builder.aliases.items[@intFromEnum(self)]; } + pub fn name(self: Index, builder: *const Builder) String { + return self.ptrConst(builder).global.name(builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -2261,7 +2332,18 @@ pub const Alias = struct { return self.toConst(builder).toValue(); } - pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { + pub fn getAliasee(self: Index, builder: *const Builder) Global.Index { + const aliasee = self.ptrConst(builder).aliasee.getBase(builder); + assert(aliasee != .none); + return aliasee; + } + + pub fn setAliasee(self: Index, aliasee: Constant, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setAliasee(aliasee.toLlvm(builder)); + self.ptr(builder).aliasee = aliasee; + } + + fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } }; @@ -2270,7 +2352,7 @@ pub const Alias = struct { pub const Variable = struct { global: Global.Index, thread_local: ThreadLocal = .default, - mutability: enum { global, constant } = .global, + mutability: Mutability = .global, init: Constant = .no_init, section: String = .none, alignment: Alignment = .default, @@ -2287,6 +2369,10 @@ pub const Variable = struct { return &builder.variables.items[@intFromEnum(self)]; } + pub fn name(self: Index, builder: *const Builder) String { + return self.ptrConst(builder).global.name(builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -2299,6 +2385,88 @@ pub const Variable = struct { return self.toConst(builder).toValue(); } + pub fn setLinkage(self: Index, linkage: Linkage, builder: *Builder) void { + return self.ptrConst(builder).global.setLinkage(linkage, builder); + } + + pub fn setUnnamedAddr(self: Index, unnamed_addr: UnnamedAddr, builder: *Builder) void { + return self.ptrConst(builder).global.setUnnamedAddr(unnamed_addr, builder); + } + + pub fn setThreadLocal(self: Index, thread_local: ThreadLocal, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setThreadLocalMode(thread_local.toLlvm()); + self.ptr(builder).thread_local = thread_local; + } + + pub fn setMutability(self: Index, mutability: Mutability, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setGlobalConstant( + llvm.Bool.fromBool(mutability == .constant), + ); + self.ptr(builder).mutability = mutability; + } + + pub fn setInitializer( + self: Index, + initializer: Constant, + builder: *Builder, + ) Allocator.Error!void { + if (initializer != .no_init) { + const variable = self.ptrConst(builder); + const global = variable.global.ptr(builder); + const initializer_type = initializer.typeOf(builder); + if (builder.useLibLlvm() and global.type != initializer_type) { + try builder.llvm.replacements.ensureUnusedCapacity(builder.gpa, 1); + // LLVM does not allow us to change the type of globals. So we must + // create a new global with the correct type, copy all its attributes, + // and then update all references to point to the new global, + // delete the original, and rename the new one to the old one's name. + // This is necessary because LLVM does not support const bitcasting + // a struct with padding bytes, which is needed to lower a const union value + // to LLVM, when a field other than the most-aligned is active. Instead, + // we must lower to an unnamed struct, and pointer cast at usage sites + // of the global. Such an unnamed struct is the cause of the global type + // mismatch, because we don't have the LLVM type until the *value* is created, + // whereas the global needs to be created based on the type alone, because + // lowering the value may reference the global as a pointer. + // Related: https://github.com/ziglang/zig/issues/13265 + const old_global = &builder.llvm.globals.items[@intFromEnum(variable.global)]; + const new_global = builder.llvm.module.?.addGlobalInAddressSpace( + initializer_type.toLlvm(builder), + "", + @intFromEnum(global.addr_space), + ); + new_global.setLinkage(global.linkage.toLlvm()); + new_global.setUnnamedAddr(llvm.Bool.fromBool(global.unnamed_addr != .default)); + new_global.setAlignment(@intCast(variable.alignment.toByteUnits() orelse 0)); + if (variable.section != .none) + new_global.setSection(variable.section.slice(builder).?); + old_global.*.replaceAllUsesWith(new_global); + builder.llvm.replacements.putAssumeCapacityNoClobber(old_global.*, variable.global); + new_global.takeName(old_global.*); + old_global.*.removeGlobalValue(); + old_global.* = new_global; + self.ptr(builder).mutability = .global; + } + global.type = initializer_type; + } + if (builder.useLibLlvm()) self.toLlvm(builder).setInitializer(switch (initializer) { + .no_init => null, + else => initializer.toLlvm(builder), + }); + self.ptr(builder).init = initializer; + } + + pub fn setSection(self: Index, section: String, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setSection(section.slice(builder).?); + self.ptr(builder).section = section; + } + + pub fn setAlignment(self: Index, alignment: Alignment, builder: *Builder) void { + if (builder.useLibLlvm()) + self.toLlvm(builder).setAlignment(@intCast(alignment.toByteUnits() orelse 0)); + self.ptr(builder).alignment = alignment; + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -3640,6 +3808,10 @@ pub const Function = struct { return &builder.functions.items[@intFromEnum(self)]; } + pub fn name(self: Index, builder: *const Builder) String { + return self.ptrConst(builder).global.name(builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -3652,6 +3824,19 @@ pub const Function = struct { return self.toConst(builder).toValue(); } + pub fn setLinkage(self: Index, linkage: Linkage, builder: *Builder) void { + return self.ptrConst(builder).global.setLinkage(linkage, builder); + } + + pub fn setUnnamedAddr(self: Index, unnamed_addr: UnnamedAddr, builder: *Builder) void { + return self.ptrConst(builder).global.setUnnamedAddr(unnamed_addr, builder); + } + + pub fn setCallConv(self: Index, call_conv: CallConv, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setFunctionCallConv(call_conv.toLlvm()); + self.ptr(builder).call_conv = call_conv; + } + pub fn setAttributes( self: Index, new_function_attributes: FunctionAttributes, @@ -3687,12 +3872,12 @@ pub const Function = struct { )) { .lt => { // Removed - if (old_attribute_kind.toString()) |name| { - const slice = name.slice(builder).?; + if (old_attribute_kind.toString()) |attribute_name| { + const attribute_name_slice = attribute_name.slice(builder).?; llvm_function.removeStringAttributeAtIndex( llvm_attribute_index, - slice.ptr, - @intCast(slice.len), + attribute_name_slice.ptr, + @intCast(attribute_name_slice.len), ); } else { const llvm_kind_id = old_attribute_kind.toLlvm(builder).*; @@ -3732,6 +3917,17 @@ pub const Function = struct { self.ptr(builder).attributes = new_function_attributes; } + pub fn setSection(self: Index, section: String, builder: *Builder) void { + if (builder.useLibLlvm()) self.toLlvm(builder).setSection(section.slice(builder).?); + self.ptr(builder).section = section; + } + + pub fn setAlignment(self: Index, alignment: Alignment, builder: *Builder) void { + if (builder.useLibLlvm()) + self.toLlvm(builder).setAlignment(@intCast(alignment.toByteUnits() orelse 0)); + self.ptr(builder).alignment = alignment; + } + pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value { return self.ptrConst(builder).global.toLlvm(builder); } @@ -4342,9 +4538,11 @@ pub const Function = struct { return .{ .data = .{ .instruction = self, .function = function, .builder = builder } }; } - pub fn toLlvm(self: Instruction.Index, wip: *const WipFunction) *llvm.Value { + fn toLlvm(self: Instruction.Index, wip: *const WipFunction) *llvm.Value { assert(wip.builder.useLibLlvm()); - return wip.llvm.instructions.items[@intFromEnum(self)]; + const llvm_value = wip.llvm.instructions.items[@intFromEnum(self)]; + const global = wip.builder.llvm.replacements.get(llvm_value) orelse return llvm_value; + return global.toLlvm(wip.builder); } fn llvmName(self: Instruction.Index, wip: *const WipFunction) [:0]const u8 { @@ -4462,6 +4660,27 @@ pub const Function = struct { fmax, fmin, none = std.math.maxInt(u5), + + fn toLlvm(self: Operation) llvm.AtomicRMWBinOp { + return switch (self) { + .xchg => .Xchg, + .add => .Add, + .sub => .Sub, + .@"and" => .And, + .nand => .Nand, + .@"or" => .Or, + .xor => .Xor, + .max => .Max, + .min => .Min, + .umax => .UMax, + .umin => .UMin, + .fadd => .FAdd, + .fsub => .FSub, + .fmax => .FMax, + .fmin => .FMin, + .none => unreachable, + }; + } }; }; @@ -5245,7 +5464,7 @@ pub const WipFunction = struct { instruction.llvmName(self), ); if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); - if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); + if (ordering != .none) llvm_instruction.setOrdering(ordering.toLlvm()); if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); } @@ -5295,7 +5514,7 @@ pub const WipFunction = struct { if (self.builder.useLibLlvm()) { const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self)); if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); - if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering))); + if (ordering != .none) llvm_instruction.setOrdering(ordering.toLlvm()); if (alignment.toByteUnits()) |bytes| llvm_instruction.setAlignment(@intCast(bytes)); self.llvm.instructions.appendAssumeCapacity(llvm_instruction); } @@ -5318,7 +5537,7 @@ pub const WipFunction = struct { }); if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity( self.llvm.builder.buildFence( - @enumFromInt(@intFromEnum(ordering)), + ordering.toLlvm(), llvm.Bool.fromBool(sync_scope == .singlethread), "", ), @@ -5370,8 +5589,8 @@ pub const WipFunction = struct { ptr.toLlvm(self), cmp.toLlvm(self), new.toLlvm(self), - @enumFromInt(@intFromEnum(success_ordering)), - @enumFromInt(@intFromEnum(failure_ordering)), + success_ordering.toLlvm(), + failure_ordering.toLlvm(), llvm.Bool.fromBool(sync_scope == .singlethread), ); if (kind == .weak) llvm_instruction.setWeak(.True); @@ -5418,10 +5637,10 @@ pub const WipFunction = struct { }); if (self.builder.useLibLlvm()) { const llvm_instruction = self.llvm.builder.buildAtomicRmw( - @enumFromInt(@intFromEnum(operation)), + operation.toLlvm(), ptr.toLlvm(self), val.toLlvm(self), - @enumFromInt(@intFromEnum(ordering)), + ordering.toLlvm(), llvm.Bool.fromBool(sync_scope == .singlethread), ); if (access_kind == .@"volatile") llvm_instruction.setVolatile(.True); @@ -5608,25 +5827,19 @@ pub const WipFunction = struct { pub fn fcmp( self: *WipFunction, + fast: FastMathKind, cond: FloatCondition, lhs: Value, rhs: Value, name: []const u8, ) Allocator.Error!Value { - return self.cmpTag(switch (cond) { - inline else => |tag| @field(Instruction.Tag, "fcmp " ++ @tagName(tag)), - }, @intFromEnum(cond), lhs, rhs, name); - } - - pub fn fcmpFast( - self: *WipFunction, - cond: FloatCondition, - lhs: Value, - rhs: Value, - name: []const u8, - ) Allocator.Error!Value { - return self.cmpTag(switch (cond) { - inline else => |tag| @field(Instruction.Tag, "fcmp fast " ++ @tagName(tag)), + return self.cmpTag(switch (fast) { + inline else => |fast_tag| switch (cond) { + inline else => |cond_tag| @field(Instruction.Tag, "fcmp " ++ switch (fast_tag) { + .normal => "", + .fast => "fast ", + } ++ @tagName(cond_tag)), + }, }, @intFromEnum(cond), lhs, rhs, name); } @@ -5684,22 +5897,16 @@ pub const WipFunction = struct { pub fn select( self: *WipFunction, + fast: FastMathKind, cond: Value, lhs: Value, rhs: Value, name: []const u8, ) Allocator.Error!Value { - return self.selectTag(.select, cond, lhs, rhs, name); - } - - pub fn selectFast( - self: *WipFunction, - cond: Value, - lhs: Value, - rhs: Value, - name: []const u8, - ) Allocator.Error!Value { - return self.selectTag(.@"select fast", cond, lhs, rhs, name); + return self.selectTag(switch (fast) { + .normal => .select, + .fast => .@"select fast", + }, cond, lhs, rhs, name); } pub fn call( @@ -5774,7 +5981,7 @@ pub const WipFunction = struct { else => instruction.llvmName(self), }, ); - llvm_instruction.setInstructionCallConv(@enumFromInt(@intFromEnum(call_conv))); + llvm_instruction.setInstructionCallConv(call_conv.toLlvm()); llvm_instruction.setTailCallKind(switch (kind) { .normal, .fast => .None, .musttail, .musttail_fast => .MustTail, @@ -5808,6 +6015,7 @@ pub const WipFunction = struct { pub fn callIntrinsic( self: *WipFunction, + fast: FastMathKind, function_attributes: FunctionAttributes, id: Intrinsic, overload: []const Type, @@ -5816,7 +6024,7 @@ pub const WipFunction = struct { ) Allocator.Error!Value { const intrinsic = try self.builder.getIntrinsic(id, overload); return self.call( - .normal, + fast.toCallKind(), CallConv.default, function_attributes, intrinsic.typeOf(self.builder), @@ -5838,6 +6046,7 @@ pub const WipFunction = struct { var dst_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = dst_align })}; var src_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = src_align })}; const value = try self.callIntrinsic( + .normal, try self.builder.fnAttrs(&.{ .none, .none, @@ -5865,6 +6074,7 @@ pub const WipFunction = struct { ) Allocator.Error!Instruction.Index { var dst_attrs = [_]Attribute.Index{try self.builder.attr(.{ .@"align" = dst_align })}; const value = try self.callIntrinsic( + .normal, try self.builder.fnAttrs(&.{ .none, .none, try self.builder.attrs(&dst_attrs) }), .memset, &.{ dst.typeOfWip(self), len.typeOfWip(self) }, @@ -6740,6 +6950,24 @@ pub const FloatCondition = enum(u4) { ult = 12, ule = 13, une = 14, + + fn toLlvm(self: FloatCondition) llvm.RealPredicate { + return switch (self) { + .oeq => .OEQ, + .ogt => .OGT, + .oge => .OGE, + .olt => .OLT, + .ole => .OLE, + .one => .ONE, + .ord => .ORD, + .uno => .UNO, + .ueq => .UEQ, + .ugt => .UGT, + .uge => .UGE, + .ult => .ULT, + .uno => .UNE, + }; + } }; pub const IntegerCondition = enum(u6) { @@ -6753,6 +6981,20 @@ pub const IntegerCondition = enum(u6) { sge = 39, slt = 40, sle = 41, + + fn toLlvm(self: IntegerCondition) llvm.IntPredicate { + return switch (self) { + .eq => .EQ, + .ne => .NE, + .ugt => .UGT, + .uge => .UGE, + .ult => .ULT, + .sgt => .SGT, + .sge => .SGE, + .slt => .SLT, + .sle => .SLE, + }; + } }; pub const MemoryAccessKind = enum(u1) { @@ -6802,6 +7044,18 @@ pub const AtomicOrdering = enum(u3) { ) @TypeOf(writer).Error!void { if (self != .none) try writer.print("{s}{s}", .{ prefix, @tagName(self) }); } + + fn toLlvm(self: AtomicOrdering) llvm.AtomicOrdering { + return switch (self) { + .none => .NotAtomic, + .unordered => .Unordered, + .monotonic => .Monotonic, + .acquire => .Acquire, + .release => .Release, + .acq_rel => .AcquireRelease, + .seq_cst => .SequentiallyConsistent, + }; + } }; const MemoryAccessInfo = packed struct(u32) { @@ -6834,6 +7088,18 @@ pub const FastMath = packed struct(u32) { }; }; +pub const FastMathKind = enum { + normal, + fast, + + pub fn toCallKind(self: FastMathKind) Function.Instruction.Call.Kind { + return switch (self) { + .normal => .normal, + .fast => .fast, + }; + } +}; + pub const Constant = enum(u32) { false, true, @@ -7247,7 +7513,7 @@ pub const Constant = enum(u32) { } }, .global => |global| switch (global.ptrConst(builder).kind) { - .alias => |alias| cur = alias.ptrConst(builder).init, + .alias => |alias| cur = alias.ptrConst(builder).aliasee, .variable, .function => return global, .replaced => unreachable, }, @@ -7586,10 +7852,12 @@ pub const Constant = enum(u32) { pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value { assert(builder.useLibLlvm()); - return switch (self.unwrap()) { + const llvm_value = switch (self.unwrap()) { .constant => |constant| builder.llvm.constants.items[constant], - .global => |global| global.toLlvm(builder), + .global => |global| return global.toLlvm(builder), }; + const global = builder.llvm.replacements.get(llvm_value) orelse return llvm_value; + return global.toLlvm(builder); } }; @@ -7726,6 +7994,7 @@ pub fn init(options: Options) InitError!Builder { .types = .{}, .globals = .{}, .constants = .{}, + .replacements = .{}, }; errdefer self.deinit(); @@ -7805,6 +8074,20 @@ pub fn init(options: Options) InitError!Builder { } pub fn deinit(self: *Builder) void { + if (self.useLibLlvm()) { + var replacement_it = self.llvm.replacements.keyIterator(); + while (replacement_it.next()) |replacement| replacement.*.deleteGlobalValue(); + self.llvm.replacements.deinit(self.gpa); + self.llvm.constants.deinit(self.gpa); + self.llvm.globals.deinit(self.gpa); + self.llvm.types.deinit(self.gpa); + self.llvm.attributes.deinit(self.gpa); + if (self.llvm.attribute_kind_ids) |attribute_kind_ids| self.gpa.destroy(attribute_kind_ids); + if (self.llvm.di_builder) |di_builder| di_builder.dispose(); + if (self.llvm.module) |module| module.dispose(); + self.llvm.context.dispose(); + } + self.module_asm.deinit(self.gpa); self.string_map.deinit(self.gpa); @@ -7834,16 +8117,6 @@ pub fn deinit(self: *Builder) void { self.constant_extra.deinit(self.gpa); self.constant_limbs.deinit(self.gpa); - if (self.useLibLlvm()) { - self.llvm.constants.deinit(self.gpa); - self.llvm.globals.deinit(self.gpa); - self.llvm.types.deinit(self.gpa); - self.llvm.attributes.deinit(self.gpa); - if (self.llvm.attribute_kind_ids) |attribute_kind_ids| self.gpa.destroy(attribute_kind_ids); - if (self.llvm.di_builder) |di_builder| di_builder.dispose(); - if (self.llvm.module) |module| module.dispose(); - self.llvm.context.dispose(); - } self.* = undefined; } @@ -8300,10 +8573,10 @@ pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Glo const global_gop = self.globals.getOrPutAssumeCapacity(id); if (!global_gop.found_existing) { global_gop.value_ptr.* = global; - global_gop.value_ptr.updateAttributes(); - const index: Global.Index = @enumFromInt(global_gop.index); - index.updateName(self); - return index; + const global_index: Global.Index = @enumFromInt(global_gop.index); + global_index.updateDsoLocal(self); + global_index.updateName(self); + return global_index; } const unique_gop = self.next_unique_global_id.getOrPutAssumeCapacity(name); @@ -8317,21 +8590,107 @@ pub fn getGlobal(self: *const Builder, name: String) ?Global.Index { return @enumFromInt(self.globals.getIndex(name) orelse return null); } -pub fn addFunction(self: *Builder, ty: Type, name: String) Allocator.Error!Function.Index { +pub fn addAlias( + self: *Builder, + name: String, + ty: Type, + addr_space: AddrSpace, + aliasee: Constant, +) Allocator.Error!Alias.Index { + assert(!name.isAnon()); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); + try self.ensureUnusedGlobalCapacity(name); + try self.aliases.ensureUnusedCapacity(self.gpa, 1); + return self.addAliasAssumeCapacity(name, ty, addr_space, aliasee); +} + +pub fn addAliasAssumeCapacity( + self: *Builder, + name: String, + ty: Type, + addr_space: AddrSpace, + aliasee: Constant, +) Alias.Index { + if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity(self.llvm.module.?.addAlias( + ty.toLlvm(self), + @intFromEnum(addr_space), + aliasee.toLlvm(self), + name.slice(self).?, + )); + const alias_index: Alias.Index = @enumFromInt(self.aliases.items.len); + self.aliases.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{ + .addr_space = addr_space, + .type = ty, + .kind = .{ .alias = alias_index }, + }), .aliasee = aliasee }); + return alias_index; +} + +pub fn addVariable( + self: *Builder, + name: String, + ty: Type, + addr_space: AddrSpace, +) Allocator.Error!Variable.Index { + assert(!name.isAnon()); + try self.ensureUnusedTypeCapacity(1, NoExtra, 0); + try self.ensureUnusedGlobalCapacity(name); + try self.variables.ensureUnusedCapacity(self.gpa, 1); + return self.addVariableAssumeCapacity(ty, name, addr_space); +} + +pub fn addVariableAssumeCapacity( + self: *Builder, + ty: Type, + name: String, + addr_space: AddrSpace, +) Variable.Index { + if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity( + self.llvm.module.?.addGlobalInAddressSpace( + ty.toLlvm(self), + name.slice(self).?, + @intFromEnum(addr_space), + ), + ); + const variable_index: Variable.Index = @enumFromInt(self.variables.items.len); + self.variables.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{ + .addr_space = addr_space, + .type = ty, + .kind = .{ .variable = variable_index }, + }) }); + return variable_index; +} + +pub fn addFunction( + self: *Builder, + ty: Type, + name: String, + addr_space: AddrSpace, +) Allocator.Error!Function.Index { assert(!name.isAnon()); try self.ensureUnusedTypeCapacity(1, NoExtra, 0); try self.ensureUnusedGlobalCapacity(name); try self.functions.ensureUnusedCapacity(self.gpa, 1); - return self.addFunctionAssumeCapacity(ty, name); + return self.addFunctionAssumeCapacity(ty, name, addr_space); } -pub fn addFunctionAssumeCapacity(self: *Builder, ty: Type, name: String) Function.Index { +pub fn addFunctionAssumeCapacity( + self: *Builder, + ty: Type, + name: String, + addr_space: AddrSpace, +) Function.Index { assert(ty.isFunction(self)); if (self.useLibLlvm()) self.llvm.globals.appendAssumeCapacity( - self.llvm.module.?.addFunction(name.slice(self).?, ty.toLlvm(self)), + self.llvm.module.?.addFunctionInAddressSpace( + name.slice(self).?, + ty.toLlvm(self), + @intFromEnum(addr_space), + ), ); const function_index: Function.Index = @enumFromInt(self.functions.items.len); self.functions.appendAssumeCapacity(.{ .global = self.addGlobalAssumeCapacity(name, .{ + .addr_space = addr_space, .type = ty, .kind = .{ .function = function_index }, }) }); @@ -8423,12 +8782,11 @@ pub fn getIntrinsic( }; } - const function_index = - try self.addFunction(try self.fnType(switch (signature.ret_len) { + const function_index = try self.addFunction(try self.fnType(switch (signature.ret_len) { 0 => .void, 1 => param_types[0], else => try self.structType(.normal, param_types[0..signature.ret_len]), - }, param_types[signature.ret_len..], .normal), name); + }, param_types[signature.ret_len..], .normal), name, .default); function_index.ptr(self).attributes = try self.fnAttrs(function_attributes); return function_index; } @@ -8889,6 +9247,40 @@ pub fn asmValue( return (try self.asmConst(ty, info, assembly, constraints)).toValue(); } +pub fn verify(self: *Builder) error{}!bool { + if (self.useLibLlvm()) { + var error_message: [*:0]const u8 = undefined; + // verifyModule always allocs the error_message even if there is no error + defer llvm.disposeMessage(error_message); + + if (self.llvm.module.?.verify(.ReturnStatus, &error_message).toBool()) { + log.err("failed verification of LLVM module:\n{s}\n", .{error_message}); + return false; + } + } + return true; +} + +pub fn writeBitcodeToFile(self: *Builder, path: []const u8) Allocator.Error!bool { + const path_z = try self.gpa.dupeZ(u8, path); + defer self.gpa.free(path_z); + return self.writeBitcodeToFileZ(path_z); +} + +pub fn writeBitcodeToFileZ(self: *Builder, path: [*:0]const u8) bool { + if (self.useLibLlvm()) { + const error_code = self.llvm.module.?.writeBitcodeToFile(path); + if (error_code != 0) { + log.err("failed dumping LLVM module to \"{s}\": {d}", .{ path, error_code }); + return false; + } + } else { + log.err("writing bitcode without libllvm not implemented", .{}); + return false; + } + return true; +} + pub fn dump(self: *Builder) void { if (self.useLibLlvm()) self.llvm.module.?.dump() @@ -8980,7 +9372,7 @@ pub fn printUnbuffered( if (variable.global.getReplacement(self) != .none) continue; const global = variable.global.ptrConst(self); try writer.print( - \\{} ={}{}{}{}{}{}{ }{} {s} {%}{ }{, } + \\{} ={}{}{}{}{ }{}{ }{} {s} {%}{ }{, } \\ , .{ variable.global.fmt(self), @@ -10906,7 +11298,7 @@ fn icmpConstAssumeCapacity( .data = self.addConstantExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( - llvm.constICmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)), + llvm.constICmp(cond.toLlvm(), lhs.toLlvm(self), rhs.toLlvm(self)), ); } return @enumFromInt(gop.index); @@ -10943,7 +11335,7 @@ fn fcmpConstAssumeCapacity( .data = self.addConstantExtraAssumeCapacity(data), }); if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity( - llvm.constFCmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)), + llvm.constFCmp(cond.toLlvm(), lhs.toLlvm(self), rhs.toLlvm(self)), ); } return @enumFromInt(gop.index); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index a756be784b..66826caa42 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -142,8 +142,14 @@ pub const Value = opaque { pub const setSection = LLVMSetSection; extern fn LLVMSetSection(Global: *Value, Section: [*:0]const u8) void; - pub const deleteGlobal = LLVMDeleteGlobal; - extern fn LLVMDeleteGlobal(GlobalVar: *Value) void; + pub const removeGlobalValue = ZigLLVMRemoveGlobalValue; + extern fn ZigLLVMRemoveGlobalValue(GlobalVal: *Value) void; + + pub const eraseGlobalValue = ZigLLVMEraseGlobalValue; + extern fn ZigLLVMEraseGlobalValue(GlobalVal: *Value) void; + + pub const deleteGlobalValue = ZigLLVMDeleteGlobalValue; + extern fn ZigLLVMDeleteGlobalValue(GlobalVal: *Value) void; pub const setAliasee = LLVMAliasSetAliasee; extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void; @@ -292,20 +298,14 @@ pub const Value = opaque { pub const setValueName = LLVMSetValueName2; extern fn LLVMSetValueName2(Val: *Value, Name: [*]const u8, NameLen: usize) void; - pub const getValueName = LLVMGetValueName; - extern fn LLVMGetValueName(Val: *Value) [*:0]const u8; - pub const takeName = ZigLLVMTakeName; extern fn ZigLLVMTakeName(new_owner: *Value, victim: *Value) void; - pub const deleteFunction = LLVMDeleteFunction; - extern fn LLVMDeleteFunction(Fn: *Value) void; - pub const getParam = LLVMGetParam; extern fn LLVMGetParam(Fn: *Value, Index: c_uint) *Value; - pub const setInitializer = LLVMSetInitializer; - extern fn LLVMSetInitializer(GlobalVar: *Value, ConstantVal: *Value) void; + pub const setInitializer = ZigLLVMSetInitializer; + extern fn ZigLLVMSetInitializer(GlobalVar: *Value, ConstantVal: ?*Value) void; pub const setDLLStorageClass = LLVMSetDLLStorageClass; extern fn LLVMSetDLLStorageClass(Global: *Value, Class: DLLStorageClass) void; @@ -316,15 +316,6 @@ pub const Value = opaque { pub const replaceAllUsesWith = LLVMReplaceAllUsesWith; extern fn LLVMReplaceAllUsesWith(OldVal: *Value, NewVal: *Value) void; - pub const getLinkage = LLVMGetLinkage; - extern fn LLVMGetLinkage(Global: *Value) Linkage; - - pub const getUnnamedAddress = LLVMGetUnnamedAddress; - extern fn LLVMGetUnnamedAddress(Global: *Value) Bool; - - pub const getAlignment = LLVMGetAlignment; - extern fn LLVMGetAlignment(V: *Value) c_uint; - pub const attachMetaData = ZigLLVMAttachMetaData; extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void; @@ -423,18 +414,12 @@ pub const Module = opaque { pub const setModuleCodeModel = ZigLLVMSetModuleCodeModel; extern fn ZigLLVMSetModuleCodeModel(module: *Module, code_model: CodeModel) void; - pub const addFunction = LLVMAddFunction; - extern fn LLVMAddFunction(*Module, Name: [*:0]const u8, FunctionTy: *Type) *Value; - pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace; extern fn ZigLLVMAddFunctionInAddressSpace(*Module, Name: [*:0]const u8, FunctionTy: *Type, AddressSpace: c_uint) *Value; pub const printToString = LLVMPrintModuleToString; extern fn LLVMPrintModuleToString(*Module) [*:0]const u8; - pub const addGlobal = LLVMAddGlobal; - extern fn LLVMAddGlobal(M: *Module, Ty: *Type, Name: [*:0]const u8) *Value; - pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace; extern fn LLVMAddGlobalInAddressSpace(M: *Module, Ty: *Type, Name: [*:0]const u8, AddressSpace: c_uint) *Value; @@ -450,16 +435,6 @@ pub const Module = opaque { Name: [*:0]const u8, ) *Value; - pub const getNamedGlobalAlias = LLVMGetNamedGlobalAlias; - extern fn LLVMGetNamedGlobalAlias( - M: *Module, - /// Empirically, LLVM will call strlen() on `Name` and so it - /// must be both null terminated and also have `NameLen` set - /// to the size. - Name: [*:0]const u8, - NameLen: usize, - ) ?*Value; - pub const setTarget = LLVMSetTarget; extern fn LLVMSetTarget(M: *Module, Triple: [*:0]const u8) void; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index f2b8cf9da5..6dd54d3ae4 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -1122,6 +1122,22 @@ void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim) { unwrap(new_owner)->takeName(unwrap(victim)); } +void ZigLLVMRemoveGlobalValue(LLVMValueRef GlobalVal) { + unwrap(GlobalVal)->removeFromParent(); +} + +void ZigLLVMEraseGlobalValue(LLVMValueRef GlobalVal) { + unwrap(GlobalVal)->eraseFromParent(); +} + +void ZigLLVMDeleteGlobalValue(LLVMValueRef GlobalVal) { + delete unwrap(GlobalVal); +} + +void ZigLLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) { + unwrap(GlobalVar)->setInitializer(ConstantVal ? unwrap(ConstantVal) : nullptr); +} + ZigLLVMDIGlobalVariable* ZigLLVMGlobalGetVariable(ZigLLVMDIGlobalVariableExpression *global_variable_expression) { return reinterpret_cast(reinterpret_cast(global_variable_expression)->getVariable()); } diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 7a0fb51b16..6671922090 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -492,6 +492,10 @@ enum ZigLLVM_ObjectFormatType { }; ZIG_EXTERN_C void ZigLLVMTakeName(LLVMValueRef new_owner, LLVMValueRef victim); +ZIG_EXTERN_C void ZigLLVMRemoveGlobalValue(LLVMValueRef GlobalVal); +ZIG_EXTERN_C void ZigLLVMEraseGlobalValue(LLVMValueRef GlobalVal); +ZIG_EXTERN_C void ZigLLVMDeleteGlobalValue(LLVMValueRef GlobalVal); +ZIG_EXTERN_C void ZigLLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal); #define ZigLLVM_DIFlags_Zero 0U #define ZigLLVM_DIFlags_Private 1U -- cgit v1.2.3 From 35cd56a3693d55eedb80e8bd1538420597b05ff5 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 8 Aug 2023 11:40:51 -0400 Subject: llvm: fix alias issues --- src/codegen/llvm.zig | 23 +- src/codegen/llvm/Builder.zig | 1007 +++++++++++++++++++++--------------------- 2 files changed, 522 insertions(+), 508 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index a443a2184e..d5856bcb52 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1079,7 +1079,8 @@ pub const Object = struct { // Same logic as below but for externs instead of exports. const decl_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue; const other_global = object.builder.getGlobal(decl_name) orelse continue; - if (other_global.eql(global, &object.builder)) continue; + if (other_global.toConst().getBase(&object.builder) == + global.toConst().getBase(&object.builder)) continue; try global.replace(other_global, &object.builder); } @@ -1087,13 +1088,14 @@ pub const Object = struct { for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { const global = object.decl_map.get(decl_index) orelse continue; + const global_base = global.toConst().getBase(&object.builder); for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. const exp_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue; const other_global = object.builder.getGlobal(exp_name) orelse continue; - if (other_global.eql(global, &object.builder)) continue; + if (other_global.toConst().getBase(&object.builder) == global_base) continue; try global.takeName(other_global, &object.builder); try other_global.replace(global, &object.builder); @@ -1714,7 +1716,7 @@ pub const Object = struct { try self.builder.string(section), &self.builder, ), - else => unreachable, + .alias, .replaced => unreachable, }; if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal) global_index.ptrConst(&self.builder).kind @@ -1735,15 +1737,16 @@ pub const Object = struct { continue; }, .variable, .function => {}, - else => unreachable, + .replaced => unreachable, } } - _ = try self.builder.addAlias( - exp_name, + const alias_index = try self.builder.addAlias( + .empty, global_index.typeOf(&self.builder), .default, global_index.toConst(), ); + try alias_index.rename(exp_name, &self.builder); } } else { const fqn = try self.builder.string( @@ -7703,7 +7706,7 @@ pub const FuncGen = struct { if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) { .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function, .function => |function| function, - else => unreachable, + .variable, .replaced => unreachable, }; return o.builder.addFunction( try o.builder.fnType(return_type, param_types, .normal), @@ -7751,11 +7754,7 @@ pub const FuncGen = struct { }; const fn_name = try o.builder.fmt("__{s}{s}f2", .{ fn_base_name, compiler_rt_float_abbrev }); - const libc_fn = try self.getLibcFunction( - fn_name, - ([1]Builder.Type{scalar_llvm_ty} ** 2)[0..], - .i32, - ); + const libc_fn = try self.getLibcFunction(fn_name, &.{ scalar_llvm_ty, scalar_llvm_ty }, .i32); const zero = try o.builder.intConst(.i32, 0); const int_cond: Builder.IntegerCondition = switch (pred) { diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index eb1df06c0b..316aca7a24 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -2320,6 +2320,10 @@ pub const Alias = struct { return self.ptrConst(builder).global.name(builder); } + pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void { + return self.ptrConst(builder).global.rename(new_name, builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -2373,6 +2377,10 @@ pub const Variable = struct { return self.ptrConst(builder).global.name(builder); } + pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void { + return self.ptrConst(builder).global.rename(new_name, builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -3812,6 +3820,10 @@ pub const Function = struct { return self.ptrConst(builder).global.name(builder); } + pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void { + return self.ptrConst(builder).global.rename(new_name, builder); + } + pub fn typeOf(self: Index, builder: *const Builder) Type { return self.ptrConst(builder).global.typeOf(builder); } @@ -9393,518 +9405,521 @@ pub fn printUnbuffered( need_newline = true; } - var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .{}; - defer attribute_groups.deinit(self.gpa); - - if (self.functions.items.len > 0) { + if (self.aliases.items.len > 0) { if (need_newline) try writer.writeByte('\n'); - for (0.., self.functions.items) |function_i, function| { - if (function_i > 0) try writer.writeByte('\n'); - const function_index: Function.Index = @enumFromInt(function_i); - if (function.global.getReplacement(self) != .none) continue; - const global = function.global.ptrConst(self); - const params_len = global.type.functionParameters(self).len; - const function_attributes = function.attributes.func(self); - if (function_attributes != .none) try writer.print( - \\; Function Attrs:{} - \\ - , .{function_attributes.fmt(self)}); + for (self.aliases.items) |alias| { + if (alias.global.getReplacement(self) != .none) continue; + const global = alias.global.ptrConst(self); try writer.print( - \\{s}{}{}{}{}{}{"} {} {}( + \\{} ={}{}{}{}{ }{} alias {%}, {%} + \\ , .{ - if (function.instructions.len > 0) "define" else "declare", + alias.global.fmt(self), global.linkage, global.preemption, global.visibility, global.dll_storage_class, - function.call_conv, - function.attributes.ret(self).fmt(self), - global.type.functionReturn(self).fmt(self), - function.global.fmt(self), + alias.thread_local, + global.unnamed_addr, + global.type.fmt(self), + alias.aliasee.fmt(self), }); - for (0..params_len) |arg| { - if (arg > 0) try writer.writeAll(", "); - try writer.print( - \\{%}{"} - , .{ - global.type.functionParameters(self)[arg].fmt(self), - function.attributes.param(arg, self).fmt(self), - }); - if (function.instructions.len > 0) - try writer.print(" {}", .{function.arg(@intCast(arg)).fmt(function_index, self)}) - else - try writer.print(" %{d}", .{arg}); - } - switch (global.type.functionKind(self)) { - .normal => {}, - .vararg => { - if (params_len > 0) try writer.writeAll(", "); - try writer.writeAll("..."); - }, - } - try writer.print("){}{ }", .{ global.unnamed_addr, global.addr_space }); - if (function_attributes != .none) try writer.print(" #{d}", .{ - (try attribute_groups.getOrPutValue(self.gpa, function_attributes, {})).index, + } + need_newline = true; + } + + var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .{}; + defer attribute_groups.deinit(self.gpa); + + for (0.., self.functions.items) |function_i, function| { + if (function.global.getReplacement(self) != .none) continue; + if (need_newline) try writer.writeByte('\n'); + const function_index: Function.Index = @enumFromInt(function_i); + const global = function.global.ptrConst(self); + const params_len = global.type.functionParameters(self).len; + const function_attributes = function.attributes.func(self); + if (function_attributes != .none) try writer.print( + \\; Function Attrs:{} + \\ + , .{function_attributes.fmt(self)}); + try writer.print( + \\{s}{}{}{}{}{}{"} {} {}( + , .{ + if (function.instructions.len > 0) "define" else "declare", + global.linkage, + global.preemption, + global.visibility, + global.dll_storage_class, + function.call_conv, + function.attributes.ret(self).fmt(self), + global.type.functionReturn(self).fmt(self), + function.global.fmt(self), + }); + for (0..params_len) |arg| { + if (arg > 0) try writer.writeAll(", "); + try writer.print( + \\{%}{"} + , .{ + global.type.functionParameters(self)[arg].fmt(self), + function.attributes.param(arg, self).fmt(self), }); - try writer.print("{ }", .{function.alignment}); - if (function.instructions.len > 0) { - var block_incoming_len: u32 = undefined; - try writer.writeAll(" {\n"); - for (params_len..function.instructions.len) |instruction_i| { - const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i); - const instruction = function.instructions.get(@intFromEnum(instruction_index)); - switch (instruction.tag) { - .add, - .@"add nsw", - .@"add nuw", - .@"add nuw nsw", - .@"and", - .ashr, - .@"ashr exact", - .fadd, - .@"fadd fast", - .@"fcmp false", - .@"fcmp fast false", - .@"fcmp fast oeq", - .@"fcmp fast oge", - .@"fcmp fast ogt", - .@"fcmp fast ole", - .@"fcmp fast olt", - .@"fcmp fast one", - .@"fcmp fast ord", - .@"fcmp fast true", - .@"fcmp fast ueq", - .@"fcmp fast uge", - .@"fcmp fast ugt", - .@"fcmp fast ule", - .@"fcmp fast ult", - .@"fcmp fast une", - .@"fcmp fast uno", - .@"fcmp oeq", - .@"fcmp oge", - .@"fcmp ogt", - .@"fcmp ole", - .@"fcmp olt", - .@"fcmp one", - .@"fcmp ord", - .@"fcmp true", - .@"fcmp ueq", - .@"fcmp uge", - .@"fcmp ugt", - .@"fcmp ule", - .@"fcmp ult", - .@"fcmp une", - .@"fcmp uno", - .fdiv, - .@"fdiv fast", - .fmul, - .@"fmul fast", - .frem, - .@"frem fast", - .fsub, - .@"fsub fast", - .@"icmp eq", - .@"icmp ne", - .@"icmp sge", - .@"icmp sgt", - .@"icmp sle", - .@"icmp slt", - .@"icmp uge", - .@"icmp ugt", - .@"icmp ule", - .@"icmp ult", - .lshr, - .@"lshr exact", - .mul, - .@"mul nsw", - .@"mul nuw", - .@"mul nuw nsw", - .@"or", - .sdiv, - .@"sdiv exact", - .srem, - .shl, - .@"shl nsw", - .@"shl nuw", - .@"shl nuw nsw", - .sub, - .@"sub nsw", - .@"sub nuw", - .@"sub nuw nsw", - .udiv, - .@"udiv exact", - .urem, - .xor, - => |tag| { - const extra = - function.extraData(Function.Instruction.Binary, instruction.data); - try writer.print(" %{} = {s} {%}, {}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.lhs.fmt(function_index, self), - extra.rhs.fmt(function_index, self), - }); - }, - .addrspacecast, - .bitcast, - .fpext, - .fptosi, - .fptoui, - .fptrunc, - .inttoptr, - .ptrtoint, - .sext, - .sitofp, - .trunc, - .uitofp, - .zext, - => |tag| { - const extra = - function.extraData(Function.Instruction.Cast, instruction.data); - try writer.print(" %{} = {s} {%} to {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.val.fmt(function_index, self), - extra.type.fmt(self), - }); - }, - .alloca, - .@"alloca inalloca", - => |tag| { - const extra = - function.extraData(Function.Instruction.Alloca, instruction.data); - try writer.print(" %{} = {s} {%}{,%}{, }{, }\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.type.fmt(self), - extra.len.fmt(function_index, self), - extra.info.alignment, - extra.info.addr_space, - }); - }, - .arg => unreachable, - .atomicrmw => |tag| { - const extra = - function.extraData(Function.Instruction.AtomicRmw, instruction.data); - try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.info.access_kind, - @tagName(extra.info.atomic_rmw_operation), - extra.ptr.fmt(function_index, self), - extra.val.fmt(function_index, self), - extra.info.sync_scope, - extra.info.success_ordering, - extra.info.alignment, - }); - }, - .block => { - block_incoming_len = instruction.data; - const name = instruction_index.name(&function); - if (@intFromEnum(instruction_index) > params_len) - try writer.writeByte('\n'); - try writer.print("{}:\n", .{name.fmt(self)}); - }, - .br => |tag| { - const target: Function.Block.Index = @enumFromInt(instruction.data); - try writer.print(" {s} {%}\n", .{ - @tagName(tag), target.toInst(&function).fmt(function_index, self), - }); - }, - .br_cond => { - const extra = - function.extraData(Function.Instruction.BrCond, instruction.data); - try writer.print(" br {%}, {%}, {%}\n", .{ - extra.cond.fmt(function_index, self), - extra.then.toInst(&function).fmt(function_index, self), - extra.@"else".toInst(&function).fmt(function_index, self), - }); - }, - .call, - .@"call fast", - .@"musttail call", - .@"musttail call fast", - .@"notail call", - .@"notail call fast", - .@"tail call", - .@"tail call fast", - => |tag| { - var extra = - function.extraDataTrail(Function.Instruction.Call, instruction.data); - const args = extra.trail.next(extra.data.args_len, Value, &function); - try writer.writeAll(" "); - const ret_ty = extra.data.ty.functionReturn(self); - switch (ret_ty) { - .void => {}, - else => try writer.print("%{} = ", .{ - instruction_index.name(&function).fmt(self), - }), - .none => unreachable, - } - try writer.print("{s}{}{}{} {%} {}(", .{ - @tagName(tag), - extra.data.info.call_conv, - extra.data.attributes.ret(self).fmt(self), - extra.data.callee.typeOf(function_index, self).pointerAddrSpace(self), - switch (extra.data.ty.functionKind(self)) { - .normal => ret_ty, - .vararg => extra.data.ty, - }.fmt(self), - extra.data.callee.fmt(function_index, self), - }); - for (0.., args) |arg_index, arg| { - if (arg_index > 0) try writer.writeAll(", "); - try writer.print("{%}{} {}", .{ - arg.typeOf(function_index, self).fmt(self), - extra.data.attributes.param(arg_index, self).fmt(self), - arg.fmt(function_index, self), - }); - } - try writer.writeByte(')'); - const call_function_attributes = extra.data.attributes.func(self); - if (call_function_attributes != .none) try writer.print(" #{d}", .{ - (try attribute_groups.getOrPutValue( - self.gpa, - call_function_attributes, - {}, - )).index, - }); - try writer.writeByte('\n'); - }, - .cmpxchg, - .@"cmpxchg weak", - => |tag| { - const extra = - function.extraData(Function.Instruction.CmpXchg, instruction.data); - try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.info.access_kind, - extra.ptr.fmt(function_index, self), - extra.cmp.fmt(function_index, self), - extra.new.fmt(function_index, self), - extra.info.sync_scope, - extra.info.success_ordering, - extra.info.failure_ordering, - extra.info.alignment, - }); - }, - .extractelement => |tag| { - const extra = function.extraData( - Function.Instruction.ExtractElement, - instruction.data, - ); - try writer.print(" %{} = {s} {%}, {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.val.fmt(function_index, self), - extra.index.fmt(function_index, self), - }); - }, - .extractvalue => |tag| { - var extra = function.extraDataTrail( - Function.Instruction.ExtractValue, - instruction.data, - ); - const indices = extra.trail.next(extra.data.indices_len, u32, &function); - try writer.print(" %{} = {s} {%}", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.data.val.fmt(function_index, self), - }); - for (indices) |index| try writer.print(", {d}", .{index}); - try writer.writeByte('\n'); - }, - .fence => |tag| { - const info: MemoryAccessInfo = @bitCast(instruction.data); - try writer.print(" {s}{ }{ }", .{ - @tagName(tag), - info.sync_scope, - info.success_ordering, - }); - }, - .fneg, - .@"fneg fast", - => |tag| { - const val: Value = @enumFromInt(instruction.data); - try writer.print(" %{} = {s} {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - val.fmt(function_index, self), - }); - }, - .getelementptr, - .@"getelementptr inbounds", - => |tag| { - var extra = function.extraDataTrail( - Function.Instruction.GetElementPtr, - instruction.data, - ); - const indices = extra.trail.next(extra.data.indices_len, Value, &function); - try writer.print(" %{} = {s} {%}, {%}", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.data.type.fmt(self), - extra.data.base.fmt(function_index, self), - }); - for (indices) |index| try writer.print(", {%}", .{ - index.fmt(function_index, self), - }); - try writer.writeByte('\n'); - }, - .insertelement => |tag| { - const extra = function.extraData( - Function.Instruction.InsertElement, - instruction.data, - ); - try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.val.fmt(function_index, self), - extra.elem.fmt(function_index, self), - extra.index.fmt(function_index, self), - }); - }, - .insertvalue => |tag| { - var extra = function.extraDataTrail( - Function.Instruction.InsertValue, - instruction.data, - ); - const indices = extra.trail.next(extra.data.indices_len, u32, &function); - try writer.print(" %{} = {s} {%}, {%}", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.data.val.fmt(function_index, self), - extra.data.elem.fmt(function_index, self), - }); - for (indices) |index| try writer.print(", {d}", .{index}); - try writer.writeByte('\n'); - }, - .load, - .@"load atomic", - => |tag| { - const extra = - function.extraData(Function.Instruction.Load, instruction.data); - try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.info.access_kind, - extra.type.fmt(self), - extra.ptr.fmt(function_index, self), - extra.info.sync_scope, - extra.info.success_ordering, - extra.info.alignment, - }); - }, - .phi, - .@"phi fast", - => |tag| { - var extra = - function.extraDataTrail(Function.Instruction.Phi, instruction.data); - const vals = extra.trail.next(block_incoming_len, Value, &function); - const blocks = - extra.trail.next(block_incoming_len, Function.Block.Index, &function); - try writer.print(" %{} = {s} {%} ", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - vals[0].typeOf(function_index, self).fmt(self), - }); - for (0.., vals, blocks) |incoming_index, incoming_val, incoming_block| { - if (incoming_index > 0) try writer.writeAll(", "); - try writer.print("[ {}, {} ]", .{ - incoming_val.fmt(function_index, self), - incoming_block.toInst(&function).fmt(function_index, self), - }); - } + if (function.instructions.len > 0) + try writer.print(" {}", .{function.arg(@intCast(arg)).fmt(function_index, self)}) + else + try writer.print(" %{d}", .{arg}); + } + switch (global.type.functionKind(self)) { + .normal => {}, + .vararg => { + if (params_len > 0) try writer.writeAll(", "); + try writer.writeAll("..."); + }, + } + try writer.print("){}{ }", .{ global.unnamed_addr, global.addr_space }); + if (function_attributes != .none) try writer.print(" #{d}", .{ + (try attribute_groups.getOrPutValue(self.gpa, function_attributes, {})).index, + }); + try writer.print("{ }", .{function.alignment}); + if (function.instructions.len > 0) { + var block_incoming_len: u32 = undefined; + try writer.writeAll(" {\n"); + for (params_len..function.instructions.len) |instruction_i| { + const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i); + const instruction = function.instructions.get(@intFromEnum(instruction_index)); + switch (instruction.tag) { + .add, + .@"add nsw", + .@"add nuw", + .@"add nuw nsw", + .@"and", + .ashr, + .@"ashr exact", + .fadd, + .@"fadd fast", + .@"fcmp false", + .@"fcmp fast false", + .@"fcmp fast oeq", + .@"fcmp fast oge", + .@"fcmp fast ogt", + .@"fcmp fast ole", + .@"fcmp fast olt", + .@"fcmp fast one", + .@"fcmp fast ord", + .@"fcmp fast true", + .@"fcmp fast ueq", + .@"fcmp fast uge", + .@"fcmp fast ugt", + .@"fcmp fast ule", + .@"fcmp fast ult", + .@"fcmp fast une", + .@"fcmp fast uno", + .@"fcmp oeq", + .@"fcmp oge", + .@"fcmp ogt", + .@"fcmp ole", + .@"fcmp olt", + .@"fcmp one", + .@"fcmp ord", + .@"fcmp true", + .@"fcmp ueq", + .@"fcmp uge", + .@"fcmp ugt", + .@"fcmp ule", + .@"fcmp ult", + .@"fcmp une", + .@"fcmp uno", + .fdiv, + .@"fdiv fast", + .fmul, + .@"fmul fast", + .frem, + .@"frem fast", + .fsub, + .@"fsub fast", + .@"icmp eq", + .@"icmp ne", + .@"icmp sge", + .@"icmp sgt", + .@"icmp sle", + .@"icmp slt", + .@"icmp uge", + .@"icmp ugt", + .@"icmp ule", + .@"icmp ult", + .lshr, + .@"lshr exact", + .mul, + .@"mul nsw", + .@"mul nuw", + .@"mul nuw nsw", + .@"or", + .sdiv, + .@"sdiv exact", + .srem, + .shl, + .@"shl nsw", + .@"shl nuw", + .@"shl nuw nsw", + .sub, + .@"sub nsw", + .@"sub nuw", + .@"sub nuw nsw", + .udiv, + .@"udiv exact", + .urem, + .xor, + => |tag| { + const extra = function.extraData(Function.Instruction.Binary, instruction.data); + try writer.print(" %{} = {s} {%}, {}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + }); + }, + .addrspacecast, + .bitcast, + .fpext, + .fptosi, + .fptoui, + .fptrunc, + .inttoptr, + .ptrtoint, + .sext, + .sitofp, + .trunc, + .uitofp, + .zext, + => |tag| { + const extra = function.extraData(Function.Instruction.Cast, instruction.data); + try writer.print(" %{} = {s} {%} to {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.type.fmt(self), + }); + }, + .alloca, + .@"alloca inalloca", + => |tag| { + const extra = function.extraData(Function.Instruction.Alloca, instruction.data); + try writer.print(" %{} = {s} {%}{,%}{, }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.type.fmt(self), + extra.len.fmt(function_index, self), + extra.info.alignment, + extra.info.addr_space, + }); + }, + .arg => unreachable, + .atomicrmw => |tag| { + const extra = + function.extraData(Function.Instruction.AtomicRmw, instruction.data); + try writer.print(" %{} = {s}{ } {s} {%}, {%}{ }{ }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.info.access_kind, + @tagName(extra.info.atomic_rmw_operation), + extra.ptr.fmt(function_index, self), + extra.val.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.alignment, + }); + }, + .block => { + block_incoming_len = instruction.data; + const name = instruction_index.name(&function); + if (@intFromEnum(instruction_index) > params_len) try writer.writeByte('\n'); - }, - .ret => |tag| { - const val: Value = @enumFromInt(instruction.data); - try writer.print(" {s} {%}\n", .{ - @tagName(tag), - val.fmt(function_index, self), - }); - }, - .@"ret void", - .@"unreachable", - => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), - .select, - .@"select fast", - => |tag| { - const extra = - function.extraData(Function.Instruction.Select, instruction.data); - try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.cond.fmt(function_index, self), - extra.lhs.fmt(function_index, self), - extra.rhs.fmt(function_index, self), - }); - }, - .shufflevector => |tag| { - const extra = function.extraData( - Function.Instruction.ShuffleVector, - instruction.data, - ); - try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + try writer.print("{}:\n", .{name.fmt(self)}); + }, + .br => |tag| { + const target: Function.Block.Index = @enumFromInt(instruction.data); + try writer.print(" {s} {%}\n", .{ + @tagName(tag), target.toInst(&function).fmt(function_index, self), + }); + }, + .br_cond => { + const extra = function.extraData(Function.Instruction.BrCond, instruction.data); + try writer.print(" br {%}, {%}, {%}\n", .{ + extra.cond.fmt(function_index, self), + extra.then.toInst(&function).fmt(function_index, self), + extra.@"else".toInst(&function).fmt(function_index, self), + }); + }, + .call, + .@"call fast", + .@"musttail call", + .@"musttail call fast", + .@"notail call", + .@"notail call fast", + .@"tail call", + .@"tail call fast", + => |tag| { + var extra = + function.extraDataTrail(Function.Instruction.Call, instruction.data); + const args = extra.trail.next(extra.data.args_len, Value, &function); + try writer.writeAll(" "); + const ret_ty = extra.data.ty.functionReturn(self); + switch (ret_ty) { + .void => {}, + else => try writer.print("%{} = ", .{ instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.lhs.fmt(function_index, self), - extra.rhs.fmt(function_index, self), - extra.mask.fmt(function_index, self), - }); - }, - .store, - .@"store atomic", - => |tag| { - const extra = - function.extraData(Function.Instruction.Store, instruction.data); - try writer.print(" {s}{ } {%}, {%}{ }{ }{, }\n", .{ - @tagName(tag), - extra.info.access_kind, - extra.val.fmt(function_index, self), - extra.ptr.fmt(function_index, self), - extra.info.sync_scope, - extra.info.success_ordering, - extra.info.alignment, - }); - }, - .@"switch" => |tag| { - var extra = - function.extraDataTrail(Function.Instruction.Switch, instruction.data); - const vals = extra.trail.next(extra.data.cases_len, Constant, &function); - const blocks = - extra.trail.next(extra.data.cases_len, Function.Block.Index, &function); - try writer.print(" {s} {%}, {%} [\n", .{ - @tagName(tag), - extra.data.val.fmt(function_index, self), - extra.data.default.toInst(&function).fmt(function_index, self), + }), + .none => unreachable, + } + try writer.print("{s}{}{}{} {%} {}(", .{ + @tagName(tag), + extra.data.info.call_conv, + extra.data.attributes.ret(self).fmt(self), + extra.data.callee.typeOf(function_index, self).pointerAddrSpace(self), + switch (extra.data.ty.functionKind(self)) { + .normal => ret_ty, + .vararg => extra.data.ty, + }.fmt(self), + extra.data.callee.fmt(function_index, self), + }); + for (0.., args) |arg_index, arg| { + if (arg_index > 0) try writer.writeAll(", "); + try writer.print("{%}{} {}", .{ + arg.typeOf(function_index, self).fmt(self), + extra.data.attributes.param(arg_index, self).fmt(self), + arg.fmt(function_index, self), }); - for (vals, blocks) |case_val, case_block| try writer.print( - " {%}, {%}\n", - .{ - case_val.fmt(self), - case_block.toInst(&function).fmt(function_index, self), - }, - ); - try writer.writeAll(" ]\n"); - }, - .va_arg => |tag| { - const extra = - function.extraData(Function.Instruction.VaArg, instruction.data); - try writer.print(" %{} = {s} {%}, {%}\n", .{ - instruction_index.name(&function).fmt(self), - @tagName(tag), - extra.list.fmt(function_index, self), - extra.type.fmt(self), + } + try writer.writeByte(')'); + const call_function_attributes = extra.data.attributes.func(self); + if (call_function_attributes != .none) try writer.print(" #{d}", .{ + (try attribute_groups.getOrPutValue( + self.gpa, + call_function_attributes, + {}, + )).index, + }); + try writer.writeByte('\n'); + }, + .cmpxchg, + .@"cmpxchg weak", + => |tag| { + const extra = + function.extraData(Function.Instruction.CmpXchg, instruction.data); + try writer.print(" %{} = {s}{ } {%}, {%}, {%}{ }{ }{ }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.info.access_kind, + extra.ptr.fmt(function_index, self), + extra.cmp.fmt(function_index, self), + extra.new.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.failure_ordering, + extra.info.alignment, + }); + }, + .extractelement => |tag| { + const extra = + function.extraData(Function.Instruction.ExtractElement, instruction.data); + try writer.print(" %{} = {s} {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.index.fmt(function_index, self), + }); + }, + .extractvalue => |tag| { + var extra = function.extraDataTrail( + Function.Instruction.ExtractValue, + instruction.data, + ); + const indices = extra.trail.next(extra.data.indices_len, u32, &function); + try writer.print(" %{} = {s} {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.val.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {d}", .{index}); + try writer.writeByte('\n'); + }, + .fence => |tag| { + const info: MemoryAccessInfo = @bitCast(instruction.data); + try writer.print(" {s}{ }{ }", .{ + @tagName(tag), + info.sync_scope, + info.success_ordering, + }); + }, + .fneg, + .@"fneg fast", + => |tag| { + const val: Value = @enumFromInt(instruction.data); + try writer.print(" %{} = {s} {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + val.fmt(function_index, self), + }); + }, + .getelementptr, + .@"getelementptr inbounds", + => |tag| { + var extra = function.extraDataTrail( + Function.Instruction.GetElementPtr, + instruction.data, + ); + const indices = extra.trail.next(extra.data.indices_len, Value, &function); + try writer.print(" %{} = {s} {%}, {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.type.fmt(self), + extra.data.base.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {%}", .{ + index.fmt(function_index, self), + }); + try writer.writeByte('\n'); + }, + .insertelement => |tag| { + const extra = + function.extraData(Function.Instruction.InsertElement, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.val.fmt(function_index, self), + extra.elem.fmt(function_index, self), + extra.index.fmt(function_index, self), + }); + }, + .insertvalue => |tag| { + var extra = + function.extraDataTrail(Function.Instruction.InsertValue, instruction.data); + const indices = extra.trail.next(extra.data.indices_len, u32, &function); + try writer.print(" %{} = {s} {%}, {%}", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.data.val.fmt(function_index, self), + extra.data.elem.fmt(function_index, self), + }); + for (indices) |index| try writer.print(", {d}", .{index}); + try writer.writeByte('\n'); + }, + .load, + .@"load atomic", + => |tag| { + const extra = function.extraData(Function.Instruction.Load, instruction.data); + try writer.print(" %{} = {s}{ } {%}, {%}{ }{ }{, }\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.info.access_kind, + extra.type.fmt(self), + extra.ptr.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.alignment, + }); + }, + .phi, + .@"phi fast", + => |tag| { + var extra = function.extraDataTrail(Function.Instruction.Phi, instruction.data); + const vals = extra.trail.next(block_incoming_len, Value, &function); + const blocks = + extra.trail.next(block_incoming_len, Function.Block.Index, &function); + try writer.print(" %{} = {s} {%} ", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + vals[0].typeOf(function_index, self).fmt(self), + }); + for (0.., vals, blocks) |incoming_index, incoming_val, incoming_block| { + if (incoming_index > 0) try writer.writeAll(", "); + try writer.print("[ {}, {} ]", .{ + incoming_val.fmt(function_index, self), + incoming_block.toInst(&function).fmt(function_index, self), }); - }, - } + } + try writer.writeByte('\n'); + }, + .ret => |tag| { + const val: Value = @enumFromInt(instruction.data); + try writer.print(" {s} {%}\n", .{ + @tagName(tag), + val.fmt(function_index, self), + }); + }, + .@"ret void", + .@"unreachable", + => |tag| try writer.print(" {s}\n", .{@tagName(tag)}), + .select, + .@"select fast", + => |tag| { + const extra = function.extraData(Function.Instruction.Select, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.cond.fmt(function_index, self), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + }); + }, + .shufflevector => |tag| { + const extra = + function.extraData(Function.Instruction.ShuffleVector, instruction.data); + try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.lhs.fmt(function_index, self), + extra.rhs.fmt(function_index, self), + extra.mask.fmt(function_index, self), + }); + }, + .store, + .@"store atomic", + => |tag| { + const extra = function.extraData(Function.Instruction.Store, instruction.data); + try writer.print(" {s}{ } {%}, {%}{ }{ }{, }\n", .{ + @tagName(tag), + extra.info.access_kind, + extra.val.fmt(function_index, self), + extra.ptr.fmt(function_index, self), + extra.info.sync_scope, + extra.info.success_ordering, + extra.info.alignment, + }); + }, + .@"switch" => |tag| { + var extra = + function.extraDataTrail(Function.Instruction.Switch, instruction.data); + const vals = extra.trail.next(extra.data.cases_len, Constant, &function); + const blocks = + extra.trail.next(extra.data.cases_len, Function.Block.Index, &function); + try writer.print(" {s} {%}, {%} [\n", .{ + @tagName(tag), + extra.data.val.fmt(function_index, self), + extra.data.default.toInst(&function).fmt(function_index, self), + }); + for (vals, blocks) |case_val, case_block| try writer.print( + " {%}, {%}\n", + .{ + case_val.fmt(self), + case_block.toInst(&function).fmt(function_index, self), + }, + ); + try writer.writeAll(" ]\n"); + }, + .va_arg => |tag| { + const extra = function.extraData(Function.Instruction.VaArg, instruction.data); + try writer.print(" %{} = {s} {%}, {%}\n", .{ + instruction_index.name(&function).fmt(self), + @tagName(tag), + extra.list.fmt(function_index, self), + extra.type.fmt(self), + }); + }, } - try writer.writeByte('}'); } - try writer.writeByte('\n'); + try writer.writeByte('}'); } + try writer.writeByte('\n'); need_newline = true; } -- cgit v1.2.3 From 53bea0f7e44591e741c357297a1f25310d36ca78 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 8 Aug 2023 21:32:55 -0400 Subject: llvm: remove dependence on llvm data layout alignment by just using the zig alignment and letting llvm promote it as desired --- src/codegen/llvm.zig | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d5856bcb52..3a2846ef8c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1408,11 +1408,7 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try o.lowerType(param_ty); - const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); - const alignment = Builder.Alignment.fromByteUnits(@max( - param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), - )); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); @@ -4938,10 +4934,7 @@ pub const FuncGen = struct { } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = Builder.Alignment.fromByteUnits(@max( - param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), - )); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); const int_ptr = try self.buildAlloca(int_llvm_ty, alignment); _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); @@ -5117,12 +5110,10 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = Builder.Alignment.fromByteUnits(@max( - o.target_data.abiAlignmentOfType(abi_ret_ty.toLlvm(&o.builder)), - return_type.abiAlignment(mod), - )); - assert(o.target_data.abiSizeOfType(abi_ret_ty.toLlvm(&o.builder)) >= - o.target_data.abiSizeOfType(llvm_ret_ty.toLlvm(&o.builder))); + const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + if (o.builder.useLibLlvm()) + assert(o.target_data.abiSizeOfType(abi_ret_ty.toLlvm(&o.builder)) >= + o.target_data.abiSizeOfType(llvm_ret_ty.toLlvm(&o.builder))); const rp = try self.buildAlloca(abi_ret_ty, alignment); _ = try self.wip.store(.normal, call, rp, alignment); return if (isByRef(return_type, mod)) -- cgit v1.2.3 From 3e1dd93bb2ac7e9d99fb340f1f4ca6868a52cb6b Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 8 Aug 2023 22:34:24 -0400 Subject: llvm: force strip without libllvm to avoid unimplemented behavior Also fix deinit bugs. --- src/codegen/llvm.zig | 16 +++++++++++----- src/zig_llvm.cpp | 11 ++++++++++- 2 files changed, 21 insertions(+), 6 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3a2846ef8c..9f77f8e8a9 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -823,7 +823,7 @@ pub const Object = struct { var builder = try Builder.init(.{ .allocator = gpa, .use_lib_llvm = options.use_lib_llvm, - .strip = options.strip, + .strip = options.strip or !options.use_lib_llvm, // TODO .name = options.root_name, .target = options.target, .triple = llvm_target_triple, @@ -961,14 +961,17 @@ pub const Object = struct { } pub fn deinit(self: *Object, gpa: Allocator) void { - self.di_map.deinit(gpa); - self.di_type_map.deinit(gpa); - self.target_data.dispose(); - self.target_machine.dispose(); + if (self.builder.useLibLlvm()) { + self.di_map.deinit(gpa); + self.di_type_map.deinit(gpa); + self.target_data.dispose(); + self.target_machine.dispose(); + } self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); self.extern_collisions.deinit(gpa); + self.builder.deinit(); self.* = undefined; } @@ -1182,6 +1185,9 @@ pub const Object = struct { emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg, }); + if (emit_asm_path == null and emit_bin_path == null and + emit_llvm_ir_path == null and emit_llvm_bc_path == null) return; + if (!self.builder.useLibLlvm()) { log.err("emitting without libllvm not implemented", .{}); return error.FailedToEmit; diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 6dd54d3ae4..256d3581df 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -1131,7 +1131,16 @@ void ZigLLVMEraseGlobalValue(LLVMValueRef GlobalVal) { } void ZigLLVMDeleteGlobalValue(LLVMValueRef GlobalVal) { - delete unwrap(GlobalVal); + auto *GV = unwrap(GlobalVal); + assert(GV->getParent() == nullptr); + switch (GV->getValueID()) { +#define HANDLE_GLOBAL_VALUE(NAME) \ + case Value::NAME##Val: \ + delete static_cast(GV); \ + break; +#include + default: llvm_unreachable("Expected global value"); + } } void ZigLLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) { -- cgit v1.2.3