aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-07-20 12:55:03 -0700
committerGitHub <noreply@github.com>2023-07-20 12:55:03 -0700
commit3f15010abe5c5efaed16799fcb94c9f84117bdde (patch)
treef1892ab71f40a4473c35783ac88d86e4290dc0e5 /src
parent3bada8e3ce9ba72f57c6fbed100c76fd40ba0d15 (diff)
parent4d31d4d875f32ed49c56151ca053a614b3ae343c (diff)
downloadzig-3f15010abe5c5efaed16799fcb94c9f84117bdde.tar.gz
zig-3f15010abe5c5efaed16799fcb94c9f84117bdde.zip
Merge pull request #16430 from jacobly0/llvm-builder
llvm: begin the journey of independence from llvm
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig6
-rw-r--r--src/Module.zig12
-rw-r--r--src/Sema.zig8
-rw-r--r--src/codegen/llvm.zig8224
-rw-r--r--src/codegen/llvm/Builder.zig7931
-rw-r--r--src/codegen/llvm/bindings.zig278
-rw-r--r--src/link.zig1
-rw-r--r--src/main.zig8
-rw-r--r--src/zig_llvm.cpp4
9 files changed, 12502 insertions, 3970 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index eb4b67933d..3a95f4831a 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -538,6 +538,7 @@ pub const InitOptions = struct {
want_lto: ?bool = null,
want_unwind_tables: ?bool = null,
use_llvm: ?bool = null,
+ use_lib_llvm: ?bool = null,
use_lld: ?bool = null,
use_clang: ?bool = null,
single_threaded: ?bool = null,
@@ -753,7 +754,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const root_name = try arena.dupeZ(u8, options.root_name);
// Make a decision on whether to use LLVM or our own backend.
- const use_llvm = build_options.have_llvm and blk: {
+ const use_lib_llvm = options.use_lib_llvm orelse build_options.have_llvm;
+ const use_llvm = blk: {
if (options.use_llvm) |explicit|
break :blk explicit;
@@ -1161,6 +1163,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
hash.add(valgrind);
hash.add(single_threaded);
hash.add(use_llvm);
+ hash.add(use_lib_llvm);
hash.add(dll_export_fns);
hash.add(options.is_test);
hash.add(options.test_evented_io);
@@ -1444,6 +1447,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.optimize_mode = options.optimize_mode,
.use_lld = use_lld,
.use_llvm = use_llvm,
+ .use_lib_llvm = use_lib_llvm,
.link_libc = link_libc,
.link_libcpp = link_libcpp,
.link_libunwind = link_libunwind,
diff --git a/src/Module.zig b/src/Module.zig
index ea444d3cc4..41236880c5 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -835,10 +835,6 @@ pub const Decl = struct {
assert(decl.has_tv);
return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
}
-
- pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
- decl.val = (try decl.val.intern(decl.ty, mod)).toValue();
- }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -4204,7 +4200,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
try wip_captures.finalize();
for (comptime_mutable_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
- try decl.intern(mod);
+ _ = try decl.internValue(mod);
}
new_decl.analysis = .complete;
} else |err| switch (err) {
@@ -4315,7 +4311,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
try wip_captures.finalize();
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
- try ct_decl.intern(mod);
+ _ = try ct_decl.internValue(mod);
}
const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 };
const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 };
@@ -5362,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
try wip_captures.finalize();
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
- try ct_decl.intern(mod);
+ _ = try ct_decl.internValue(mod);
}
// Copy the block into place and mark that as the main block.
@@ -6369,7 +6365,7 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void {
if (decl.alive) return;
decl.alive = true;
- try decl.intern(mod);
+ _ = try decl.internValue(mod);
// This is the first time we are marking this Decl alive. We must
// therefore recurse into its value and mark any Decl it references
diff --git a/src/Sema.zig b/src/Sema.zig
index 3f8b936e0b..08d5f02a17 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -3899,7 +3899,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
try mod.declareDeclDependency(sema.owner_decl_index, decl_index);
const decl = mod.declPtr(decl_index);
- if (iac.is_const) try decl.intern(mod);
+ if (iac.is_const) _ = try decl.internValue(mod);
const final_elem_ty = decl.ty;
const final_ptr_ty = try mod.ptrType(.{
.child = final_elem_ty.toIntern(),
@@ -33577,7 +33577,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
try wip_captures.finalize();
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
- try ct_decl.intern(mod);
+ _ = try ct_decl.internValue(mod);
}
} else {
if (fields_bit_sum > std.math.maxInt(u16)) {
@@ -34645,7 +34645,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
try wip_captures.finalize();
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
- try ct_decl.intern(mod);
+ _ = try ct_decl.internValue(mod);
}
struct_obj.have_field_inits = true;
@@ -34744,7 +34744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
try wip_captures.finalize();
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
- try ct_decl.intern(mod);
+ _ = try ct_decl.internValue(mod);
}
try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 4960414499..57842ef1e0 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -7,7 +7,11 @@ const math = std.math;
const native_endian = builtin.cpu.arch.endian();
const DW = std.dwarf;
-const llvm = @import("llvm/bindings.zig");
+const Builder = @import("llvm/Builder.zig");
+const llvm = if (build_options.have_llvm or true)
+ @import("llvm/bindings.zig")
+else
+ @compileError("LLVM unavailable");
const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
const build_options = @import("build_options");
@@ -34,7 +38,7 @@ const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
const Error = error{ OutOfMemory, CodegenFail };
-pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
+pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
var llvm_triple = std.ArrayList(u8).init(allocator);
defer llvm_triple.deinit();
@@ -207,7 +211,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
};
try llvm_triple.appendSlice(llvm_abi);
- return llvm_triple.toOwnedSliceSentinel(0);
+ return llvm_triple.toOwnedSlice();
}
pub fn targetOs(os_tag: std.Target.Os.Tag) llvm.OSType {
@@ -327,17 +331,363 @@ pub fn supportsTailCall(target: std.Target) bool {
}
}
-/// TODO can this be done with simpler logic / different API binding?
-fn deleteLlvmGlobal(llvm_global: *llvm.Value) void {
- if (llvm_global.globalGetValueType().getTypeKind() == .Function) {
- llvm_global.deleteFunction();
- return;
+const DataLayoutBuilder = struct {
+ target: std.Target,
+
+ pub fn format(
+ self: DataLayoutBuilder,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ const is_aarch64_windows = self.target.cpu.arch == .aarch64 and self.target.os.tag == .windows;
+ try writer.writeByte(switch (self.target.cpu.arch.endian()) {
+ .Little => 'e',
+ .Big => 'E',
+ });
+ switch (self.target.cpu.arch) {
+ .amdgcn,
+ .nvptx,
+ .nvptx64,
+ => {},
+ .avr => try writer.writeAll("-P1"),
+ else => try writer.print("-m:{c}", .{@as(u8, switch (self.target.cpu.arch) {
+ .mips, .mipsel => 'm', // Mips mangling: Private symbols get a $ prefix.
+ else => switch (self.target.ofmt) {
+ .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix.
+ //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix.
+ .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix.
+ // Other symbols get a `_` prefix.
+ .coff => switch (self.target.os.tag) {
+ .windows => switch (self.target.cpu.arch) {
+ .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual
+ // prefix. Regular C symbols get a `_` prefix. Functions with `__stdcall`,
+ //`__fastcall`, and `__vectorcall` have custom mangling that appends `@N`
+ // where N is the number of bytes used to pass parameters. C++ symbols
+ // starting with `?` are not mangled in any way.
+ else => 'w', // Windows COFF mangling: Similar to x, except that normal C
+ // symbols do not receive a `_` prefix.
+ },
+ else => 'e',
+ },
+ //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix.
+ else => 'e',
+ },
+ })}),
+ }
+ var any_non_integral = false;
+ const ptr_bit_width = self.target.ptrBitWidth();
+ var default_info = struct { size: u16, abi: u16, pref: u16, idx: u16 }{
+ .size = 64,
+ .abi = 64,
+ .pref = 64,
+ .idx = 64,
+ };
+ const addr_space_info = llvmAddrSpaceInfo(self.target);
+ for (addr_space_info, 0..) |info, i| {
+ assert((info.llvm == .default) == (i == 0));
+ if (info.non_integral) {
+ assert(info.llvm != .default);
+ any_non_integral = true;
+ }
+ const size = info.size orelse ptr_bit_width;
+ const abi = info.abi orelse ptr_bit_width;
+ const pref = info.pref orelse abi;
+ const idx = info.idx orelse size;
+ const matches_default =
+ size == default_info.size and
+ abi == default_info.abi and
+ pref == default_info.pref and
+ idx == default_info.idx;
+ if (info.llvm == .default) default_info = .{
+ .size = size,
+ .abi = abi,
+ .pref = pref,
+ .idx = idx,
+ };
+ if (self.target.cpu.arch == .aarch64_32) continue;
+ if (!info.force_in_data_layout and matches_default and
+ self.target.cpu.arch != .riscv64 and !is_aarch64_windows and
+ self.target.cpu.arch != .bpfeb and self.target.cpu.arch != .bpfel) continue;
+ try writer.writeAll("-p");
+ if (info.llvm != .default) try writer.print("{d}", .{@intFromEnum(info.llvm)});
+ try writer.print(":{d}:{d}", .{ size, abi });
+ if (pref != abi or idx != size or self.target.cpu.arch == .hexagon) {
+ try writer.print(":{d}", .{pref});
+ if (idx != size) try writer.print(":{d}", .{idx});
+ }
+ }
+ if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb())
+ try writer.writeAll("-Fi8"); // for thumb interwork
+ if (self.target.cpu.arch != .hexagon) {
+ if (self.target.cpu.arch == .s390x) try self.typeAlignment(.integer, 1, 8, 8, false, writer);
+ try self.typeAlignment(.integer, 8, 8, 8, false, writer);
+ try self.typeAlignment(.integer, 16, 16, 16, false, writer);
+ try self.typeAlignment(.integer, 32, if (is_aarch64_windows) 0 else 32, 32, false, writer);
+ try self.typeAlignment(.integer, 64, 32, 64, false, writer);
+ try self.typeAlignment(.integer, 128, 32, 64, false, writer);
+ if (backendSupportsF16(self.target)) try self.typeAlignment(.float, 16, 16, 16, false, writer);
+ try self.typeAlignment(.float, 32, 32, 32, false, writer);
+ try self.typeAlignment(.float, 64, 64, 64, false, writer);
+ if (backendSupportsF80(self.target)) try self.typeAlignment(.float, 80, 0, 0, false, writer);
+ try self.typeAlignment(.float, 128, 128, 128, false, writer);
+ }
+ switch (self.target.cpu.arch) {
+ .amdgcn => {
+ try self.typeAlignment(.vector, 16, 16, 16, false, writer);
+ try self.typeAlignment(.vector, 24, 32, 32, false, writer);
+ try self.typeAlignment(.vector, 32, 32, 32, false, writer);
+ try self.typeAlignment(.vector, 48, 64, 64, false, writer);
+ try self.typeAlignment(.vector, 96, 128, 128, false, writer);
+ try self.typeAlignment(.vector, 192, 256, 256, false, writer);
+ try self.typeAlignment(.vector, 256, 256, 256, false, writer);
+ try self.typeAlignment(.vector, 512, 512, 512, false, writer);
+ try self.typeAlignment(.vector, 1024, 1024, 1024, false, writer);
+ try self.typeAlignment(.vector, 2048, 2048, 2048, false, writer);
+ },
+ .ve => {},
+ else => {
+ try self.typeAlignment(.vector, 16, 32, 32, false, writer);
+ try self.typeAlignment(.vector, 32, 32, 32, false, writer);
+ try self.typeAlignment(.vector, 64, 64, 64, false, writer);
+ try self.typeAlignment(.vector, 128, 128, 128, true, writer);
+ },
+ }
+ if (self.target.os.tag != .windows and self.target.cpu.arch != .avr)
+ try self.typeAlignment(.aggregate, 0, 0, 64, false, writer);
+ for (@as([]const u24, switch (self.target.cpu.arch) {
+ .avr => &.{8},
+ .msp430 => &.{ 8, 16 },
+ .arm,
+ .armeb,
+ .mips,
+ .mipsel,
+ .powerpc,
+ .powerpcle,
+ .riscv32,
+ .sparc,
+ .sparcel,
+ .thumb,
+ .thumbeb,
+ => &.{32},
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .amdgcn,
+ .bpfeb,
+ .bpfel,
+ .mips64,
+ .mips64el,
+ .powerpc64,
+ .powerpc64le,
+ .riscv64,
+ .s390x,
+ .sparc64,
+ .ve,
+ .wasm32,
+ .wasm64,
+ => &.{ 32, 64 },
+ .hexagon => &.{ 16, 32 },
+ .x86 => &.{ 8, 16, 32 },
+ .nvptx,
+ .nvptx64,
+ => &.{ 16, 32, 64 },
+ .x86_64 => &.{ 8, 16, 32, 64 },
+ else => &.{},
+ }), 0..) |natural, index| switch (index) {
+ 0 => try writer.print("-n{d}", .{natural}),
+ else => try writer.print(":{d}", .{natural}),
+ };
+ if (self.target.cpu.arch == .hexagon) {
+ try self.typeAlignment(.integer, 64, 64, 64, true, writer);
+ try self.typeAlignment(.integer, 32, 32, 32, true, writer);
+ try self.typeAlignment(.integer, 16, 16, 16, true, writer);
+ try self.typeAlignment(.integer, 1, 8, 8, true, writer);
+ try self.typeAlignment(.float, 32, 32, 32, true, writer);
+ try self.typeAlignment(.float, 64, 64, 64, true, writer);
+ }
+ if (self.target.os.tag == .windows or self.target.cpu.arch == .avr)
+ try self.typeAlignment(.aggregate, 0, 0, 64, false, writer);
+ const stack_abi = self.target.stackAlignment() * 8;
+ if (self.target.os.tag == .windows or self.target.cpu.arch == .msp430 or
+ stack_abi != ptr_bit_width)
+ try writer.print("-S{d}", .{stack_abi});
+ switch (self.target.cpu.arch) {
+ .hexagon, .ve => {
+ try self.typeAlignment(.vector, 32, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 64, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 128, 128, 128, true, writer);
+ },
+ else => {},
+ }
+ if (self.target.cpu.arch != .amdgcn) {
+ try self.typeAlignment(.vector, 256, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 512, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 1024, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 2048, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 4096, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 8192, 128, 128, true, writer);
+ try self.typeAlignment(.vector, 16384, 128, 128, true, writer);
+ }
+ const alloca_addr_space = llvmAllocaAddressSpace(self.target);
+ if (alloca_addr_space != .default) try writer.print("-A{d}", .{@intFromEnum(alloca_addr_space)});
+ const global_addr_space = llvmDefaultGlobalAddressSpace(self.target);
+ if (global_addr_space != .default) try writer.print("-G{d}", .{@intFromEnum(global_addr_space)});
+ if (any_non_integral) {
+ try writer.writeAll("-ni");
+ for (addr_space_info) |info| if (info.non_integral)
+ try writer.print(":{d}", .{@intFromEnum(info.llvm)});
+ }
+ }
+
+ fn typeAlignment(
+ self: DataLayoutBuilder,
+ kind: enum { integer, vector, float, aggregate },
+ size: u24,
+ default_abi: u24,
+ default_pref: u24,
+ default_force_pref: bool,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ var abi = default_abi;
+ var pref = default_pref;
+ var force_abi = false;
+ var force_pref = default_force_pref;
+ if (kind == .float and size == 80) {
+ abi = 128;
+ pref = 128;
+ }
+ for (@as([]const std.Target.CType, switch (kind) {
+ .integer => &.{ .char, .short, .int, .long, .longlong },
+ .float => &.{ .float, .double, .longdouble },
+ .vector, .aggregate => &.{},
+ })) |cty| {
+ if (self.target.c_type_bit_size(cty) != size) continue;
+ abi = self.target.c_type_alignment(cty) * 8;
+ pref = self.target.c_type_preferred_alignment(cty) * 8;
+ break;
+ }
+ switch (kind) {
+ .integer => {
+ if (self.target.ptrBitWidth() <= 16 and size >= 128) return;
+ abi = @min(abi, self.target.maxIntAlignment() * 8);
+ switch (self.target.os.tag) {
+ .linux => switch (self.target.cpu.arch) {
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .mips,
+ .mipsel,
+ => pref = @max(pref, 32),
+ else => {},
+ },
+ else => {},
+ }
+ switch (self.target.cpu.arch) {
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .bpfeb,
+ .bpfel,
+ .nvptx,
+ .nvptx64,
+ .riscv64,
+ => if (size == 128) {
+ abi = size;
+ pref = size;
+ },
+ .hexagon => force_abi = true,
+ .mips64,
+ .mips64el,
+ => if (size <= 32) {
+ pref = 32;
+ },
+ .s390x => if (size <= 16) {
+ pref = 16;
+ },
+ .ve => if (size == 64) {
+ abi = size;
+ pref = size;
+ },
+ else => {},
+ }
+ },
+ .vector => if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) {
+ switch (size) {
+ 128 => abi = 64,
+ else => {},
+ }
+ } else if ((self.target.cpu.arch.isPPC64() and (size == 256 or size == 512)) or
+ (self.target.cpu.arch.isNvptx() and (size == 16 or size == 32)))
+ {
+ force_abi = true;
+ abi = size;
+ pref = size;
+ } else if (self.target.cpu.arch == .amdgcn and size <= 2048) {
+ force_abi = true;
+ } else if (self.target.cpu.arch == .hexagon and
+ ((size >= 32 and size <= 64) or (size >= 512 and size <= 2048)))
+ {
+ abi = size;
+ pref = size;
+ force_pref = true;
+ } else if (self.target.cpu.arch == .s390x and size == 128) {
+ abi = 64;
+ pref = 64;
+ force_pref = false;
+ } else if (self.target.cpu.arch == .ve and (size >= 64 and size <= 16384)) {
+ abi = 64;
+ pref = 64;
+ force_abi = true;
+ force_pref = true;
+ },
+ .float => switch (self.target.cpu.arch) {
+ .avr, .msp430, .sparc64 => if (size != 32 and size != 64) return,
+ .hexagon => if (size == 32 or size == 64) {
+ force_abi = true;
+ },
+ .aarch64_32 => if (size == 128) {
+ abi = size;
+ pref = size;
+ },
+ .ve => if (size == 64) {
+ abi = size;
+ pref = size;
+ },
+ else => {},
+ },
+ .aggregate => if (self.target.os.tag == .windows or
+ self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb())
+ {
+ pref = @min(pref, self.target.ptrBitWidth());
+ } else if (self.target.cpu.arch == .hexagon) {
+ abi = 0;
+ pref = 0;
+ } else if (self.target.cpu.arch == .s390x) {
+ abi = 8;
+ pref = 16;
+ } else if (self.target.cpu.arch == .msp430) {
+ abi = 8;
+ pref = 8;
+ },
+ }
+ if (kind != .vector and self.target.cpu.arch == .avr) {
+ force_abi = true;
+ abi = 8;
+ pref = 8;
+ }
+ if (!force_abi and abi == default_abi and pref == default_pref) return;
+ try writer.print("-{c}", .{@tagName(kind)[0]});
+ if (size != 0) try writer.print("{d}", .{size});
+ try writer.print(":{d}", .{abi});
+ if (pref != abi or force_pref) try writer.print(":{d}", .{pref});
}
- return llvm_global.deleteGlobal();
-}
+};
pub const Object = struct {
gpa: Allocator,
+ builder: Builder,
+
module: *Module,
llvm_module: *llvm.Module,
di_builder: ?*llvm.DIBuilder,
@@ -347,7 +697,6 @@ pub const Object = struct {
/// - *Module.Decl (Non-Fn) => *DIGlobalVariable
di_map: std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DINode),
di_compile_unit: ?*llvm.DICompileUnit,
- context: *llvm.Context,
target_machine: *llvm.TargetMachine,
target_data: *llvm.TargetData,
target: std.Target,
@@ -359,9 +708,9 @@ pub const Object = struct {
/// version of the name and incorrectly get function not found in the llvm module.
/// * it works for functions not all globals.
/// Therefore, this table keeps track of the mapping.
- decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
+ decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Global.Index),
/// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
- named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
+ named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Function.Index),
/// Maps Zig types to LLVM types. The table memory is backed by the GPA of
/// the compiler.
/// TODO when InternPool garbage collection is implemented, this map needs
@@ -371,16 +720,16 @@ pub const Object = struct {
/// The LLVM global table which holds the names corresponding to Zig errors.
/// Note that the values are not added until flushModule, when all errors in
/// the compilation are known.
- error_name_table: ?*llvm.Value,
+ error_name_table: Builder.Variable.Index,
/// This map is usually very close to empty. It tracks only the cases when a
/// second extern Decl could not be emitted with the correct name due to a
/// name collision.
extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void),
/// Memoizes a null `?usize` value.
- null_opt_addr: ?*llvm.Value,
+ null_opt_usize: Builder.Constant,
- pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type);
+ pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, Builder.Type);
/// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we
/// want to iterate over it while adding entries to it.
@@ -394,138 +743,137 @@ pub const Object = struct {
}
pub fn init(gpa: Allocator, options: link.Options) !Object {
- const context = llvm.Context.create();
- errdefer context.dispose();
-
- initializeLLVMTarget(options.target.cpu.arch);
-
- const llvm_module = llvm.Module.createWithName(options.root_name.ptr, context);
- errdefer llvm_module.dispose();
-
const llvm_target_triple = try targetTriple(gpa, options.target);
defer gpa.free(llvm_target_triple);
- var error_message: [*:0]const u8 = undefined;
- var target: *llvm.Target = undefined;
- if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) {
- defer llvm.disposeMessage(error_message);
-
- log.err("LLVM failed to parse '{s}': {s}", .{ llvm_target_triple, error_message });
- return error.InvalidLlvmTriple;
- }
+ var builder = try Builder.init(.{
+ .allocator = gpa,
+ .use_lib_llvm = options.use_lib_llvm,
+ .strip = options.strip,
+ .name = options.root_name,
+ .target = options.target,
+ .triple = llvm_target_triple,
+ });
+ errdefer builder.deinit();
+
+ var target_machine: *llvm.TargetMachine = undefined;
+ var target_data: *llvm.TargetData = undefined;
+ if (builder.useLibLlvm()) {
+ if (!options.strip) {
+ switch (options.target.ofmt) {
+ .coff => builder.llvm.module.?.addModuleCodeViewFlag(),
+ else => builder.llvm.module.?.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"),
+ }
+ builder.llvm.di_builder = builder.llvm.module.?.createDIBuilder(true);
+
+ // Don't use the version string here; LLVM misparses it when it
+ // includes the git revision.
+ const producer = try builder.fmt("zig {d}.{d}.{d}", .{
+ build_options.semver.major,
+ build_options.semver.minor,
+ build_options.semver.patch,
+ });
- llvm_module.setTarget(llvm_target_triple.ptr);
- var opt_di_builder: ?*llvm.DIBuilder = null;
- errdefer if (opt_di_builder) |di_builder| di_builder.dispose();
+ // We fully resolve all paths at this point to avoid lack of source line info in stack
+ // traces or lack of debugging information which, if relative paths were used, would
+ // be very location dependent.
+ // TODO: the only concern I have with this is WASI as either host or target, should
+ // we leave the paths as relative then?
+ var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
+ const compile_unit_dir = blk: {
+ const path = d: {
+ const mod = options.module orelse break :d ".";
+ break :d mod.root_pkg.root_src_directory.path orelse ".";
+ };
+ if (std.fs.path.isAbsolute(path)) break :blk path;
+ break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was
+ };
+ const compile_unit_dir_z = try builder.gpa.dupeZ(u8, compile_unit_dir);
+ defer builder.gpa.free(compile_unit_dir_z);
+
+ builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit(
+ DW.LANG.C99,
+ builder.llvm.di_builder.?.createFile(options.root_name, compile_unit_dir_z),
+ producer.toSlice(&builder).?,
+ options.optimize_mode != .Debug,
+ "", // flags
+ 0, // runtime version
+ "", // split name
+ 0, // dwo id
+ true, // emit debug info
+ );
+ }
- var di_compile_unit: ?*llvm.DICompileUnit = null;
+ const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug)
+ .None
+ else
+ .Aggressive;
- if (!options.strip) {
- switch (options.target.ofmt) {
- .coff => llvm_module.addModuleCodeViewFlag(),
- else => llvm_module.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"),
- }
- const di_builder = llvm_module.createDIBuilder(true);
- opt_di_builder = di_builder;
-
- // Don't use the version string here; LLVM misparses it when it
- // includes the git revision.
- const producer = try std.fmt.allocPrintZ(gpa, "zig {d}.{d}.{d}", .{
- build_options.semver.major,
- build_options.semver.minor,
- build_options.semver.patch,
- });
- defer gpa.free(producer);
-
- // We fully resolve all paths at this point to avoid lack of source line info in stack
- // traces or lack of debugging information which, if relative paths were used, would
- // be very location dependent.
- // TODO: the only concern I have with this is WASI as either host or target, should
- // we leave the paths as relative then?
- var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
- const compile_unit_dir = blk: {
- const path = d: {
- const mod = options.module orelse break :d ".";
- break :d mod.root_pkg.root_src_directory.path orelse ".";
- };
- if (std.fs.path.isAbsolute(path)) break :blk path;
- break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was
+ const reloc_mode: llvm.RelocMode = if (options.pic)
+ .PIC
+ else if (options.link_mode == .Dynamic)
+ llvm.RelocMode.DynamicNoPIC
+ else
+ .Static;
+
+ const code_model: llvm.CodeModel = switch (options.machine_code_model) {
+ .default => .Default,
+ .tiny => .Tiny,
+ .small => .Small,
+ .kernel => .Kernel,
+ .medium => .Medium,
+ .large => .Large,
};
- const compile_unit_dir_z = try gpa.dupeZ(u8, compile_unit_dir);
- defer gpa.free(compile_unit_dir_z);
-
- di_compile_unit = di_builder.createCompileUnit(
- DW.LANG.C99,
- di_builder.createFile(options.root_name, compile_unit_dir_z),
- producer,
- options.optimize_mode != .Debug,
- "", // flags
- 0, // runtime version
- "", // split name
- 0, // dwo id
- true, // emit debug info
- );
- }
- const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug)
- .None
- else
- .Aggressive;
+ // TODO handle float ABI better- it should depend on the ABI portion of std.Target
+ const float_abi: llvm.ABIType = .Default;
+
+ target_machine = llvm.TargetMachine.create(
+ builder.llvm.target.?,
+ builder.target_triple.toSlice(&builder).?,
+ if (options.target.cpu.model.llvm_name) |s| s.ptr else null,
+ options.llvm_cpu_features,
+ opt_level,
+ reloc_mode,
+ code_model,
+ options.function_sections,
+ float_abi,
+ if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null,
+ );
+ errdefer target_machine.dispose();
- const reloc_mode: llvm.RelocMode = if (options.pic)
- .PIC
- else if (options.link_mode == .Dynamic)
- llvm.RelocMode.DynamicNoPIC
- else
- .Static;
-
- const code_model: llvm.CodeModel = switch (options.machine_code_model) {
- .default => .Default,
- .tiny => .Tiny,
- .small => .Small,
- .kernel => .Kernel,
- .medium => .Medium,
- .large => .Large,
- };
+ target_data = target_machine.createTargetDataLayout();
+ errdefer target_data.dispose();
- // TODO handle float ABI better- it should depend on the ABI portion of std.Target
- const float_abi: llvm.ABIType = .Default;
-
- const target_machine = llvm.TargetMachine.create(
- target,
- llvm_target_triple.ptr,
- if (options.target.cpu.model.llvm_name) |s| s.ptr else null,
- options.llvm_cpu_features,
- opt_level,
- reloc_mode,
- code_model,
- options.function_sections,
- float_abi,
- if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null,
- );
- errdefer target_machine.dispose();
+ builder.llvm.module.?.setModuleDataLayout(target_data);
- const target_data = target_machine.createTargetDataLayout();
- errdefer target_data.dispose();
+ if (options.pic) builder.llvm.module.?.setModulePICLevel();
+ if (options.pie) builder.llvm.module.?.setModulePIELevel();
+ if (code_model != .Default) builder.llvm.module.?.setModuleCodeModel(code_model);
- llvm_module.setModuleDataLayout(target_data);
-
- if (options.pic) llvm_module.setModulePICLevel();
- if (options.pie) llvm_module.setModulePIELevel();
- if (code_model != .Default) llvm_module.setModuleCodeModel(code_model);
+ if (options.opt_bisect_limit >= 0) {
+ builder.llvm.context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit));
+ }
- if (options.opt_bisect_limit >= 0) {
- context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit));
+ builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }});
+ if (std.debug.runtime_safety) {
+ const rep = target_data.stringRep();
+ defer llvm.disposeMessage(rep);
+ std.testing.expectEqualStrings(
+ std.mem.span(rep),
+ builder.data_layout.toSlice(&builder).?,
+ ) catch unreachable;
+ }
}
- return Object{
+ return .{
.gpa = gpa,
+ .builder = builder,
.module = options.module.?,
- .llvm_module = llvm_module,
+ .llvm_module = builder.llvm.module.?,
.di_map = .{},
- .di_builder = opt_di_builder,
- .di_compile_unit = di_compile_unit,
- .context = context,
+ .di_builder = builder.llvm.di_builder,
+ .di_compile_unit = builder.llvm.di_compile_unit,
.target_machine = target_machine,
.target_data = target_data,
.target = options.target,
@@ -533,22 +881,17 @@ pub const Object = struct {
.named_enum_map = .{},
.type_map = .{},
.di_type_map = .{},
- .error_name_table = null,
+ .error_name_table = .none,
.extern_collisions = .{},
- .null_opt_addr = null,
+ .null_opt_usize = .no_init,
};
}
pub fn deinit(self: *Object, gpa: Allocator) void {
- if (self.di_builder) |dib| {
- dib.dispose();
- self.di_map.deinit(gpa);
- self.di_type_map.deinit(gpa);
- }
+ self.di_map.deinit(gpa);
+ self.di_type_map.deinit(gpa);
self.target_data.dispose();
self.target_machine.dispose();
- self.llvm_module.dispose();
- self.context.dispose();
self.decl_map.deinit(gpa);
self.named_enum_map.deinit(gpa);
self.type_map.deinit(gpa);
@@ -572,85 +915,108 @@ pub const Object = struct {
return slice.ptr;
}
- fn genErrorNameTable(o: *Object) !void {
+ fn genErrorNameTable(o: *Object) Allocator.Error!void {
// If o.error_name_table is null, there was no instruction that actually referenced the error table.
- const error_name_table_ptr_global = o.error_name_table orelse return;
+ const error_name_table_ptr_global = o.error_name_table;
+ if (error_name_table_ptr_global == .none) return;
const mod = o.module;
- const target = mod.getTarget();
-
- const llvm_ptr_ty = o.context.pointerType(0); // TODO: Address space
- const llvm_usize_ty = o.context.intType(target.ptrBitWidth());
- const type_fields = [_]*llvm.Type{
- llvm_ptr_ty,
- llvm_usize_ty,
- };
- const llvm_slice_ty = o.context.structType(&type_fields, type_fields.len, .False);
- const slice_ty = Type.slice_const_u8_sentinel_0;
- const slice_alignment = slice_ty.abiAlignment(mod);
const error_name_list = mod.global_error_set.keys();
- const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len);
+ const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len);
defer mod.gpa.free(llvm_errors);
- llvm_errors[0] = llvm_slice_ty.getUndef();
+ // TODO: Address space
+ const slice_ty = Type.slice_const_u8_sentinel_0;
+ const slice_alignment = slice_ty.abiAlignment(mod);
+ const llvm_usize_ty = try o.lowerType(Type.usize);
+ const llvm_slice_ty = try o.lowerType(slice_ty);
+ const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty);
+
+ llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty);
for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| {
- const name = mod.intern_pool.stringToSlice(name_nts);
- const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
- const str_global = o.llvm_module.addGlobal(str_init.typeOf(), "");
- str_global.setInitializer(str_init);
- str_global.setLinkage(.Private);
- str_global.setGlobalConstant(.True);
- str_global.setUnnamedAddr(.True);
- str_global.setAlignment(1);
-
- const slice_fields = [_]*llvm.Value{
- str_global,
- llvm_usize_ty.constInt(name.len, .False),
+ const name = try o.builder.string(mod.intern_pool.stringToSlice(name_nts));
+ const str_init = try o.builder.stringNullConst(name);
+ const str_ty = str_init.typeOf(&o.builder);
+ const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), "");
+ str_llvm_global.setInitializer(str_init.toLlvm(&o.builder));
+ str_llvm_global.setLinkage(.Private);
+ str_llvm_global.setGlobalConstant(.True);
+ str_llvm_global.setUnnamedAddr(.True);
+ str_llvm_global.setAlignment(1);
+
+ var str_global = Builder.Global{
+ .linkage = .private,
+ .unnamed_addr = .unnamed_addr,
+ .type = str_ty,
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
};
- llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len);
- }
+ var str_variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ .mutability = .constant,
+ .init = str_init,
+ .alignment = comptime Builder.Alignment.fromByteUnits(1),
+ };
+ try o.builder.llvm.globals.append(o.gpa, str_llvm_global);
+ const global_index = try o.builder.addGlobal(.empty, str_global);
+ try o.builder.variables.append(o.gpa, str_variable);
- const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len)));
+ llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{
+ global_index.toConst(),
+ try o.builder.intConst(llvm_usize_ty, name.toSlice(&o.builder).?.len),
+ });
+ }
- const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), "");
- error_name_table_global.setInitializer(error_name_table_init);
+ const error_name_table_init = try o.builder.arrayConst(llvm_table_ty, llvm_errors);
+ const error_name_table_global = o.llvm_module.addGlobal(llvm_table_ty.toLlvm(&o.builder), "");
+ error_name_table_global.setInitializer(error_name_table_init.toLlvm(&o.builder));
error_name_table_global.setLinkage(.Private);
error_name_table_global.setGlobalConstant(.True);
error_name_table_global.setUnnamedAddr(.True);
error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode
+ var global = Builder.Global{
+ .linkage = .private,
+ .unnamed_addr = .unnamed_addr,
+ .type = llvm_table_ty,
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
+ };
+ var variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ .mutability = .constant,
+ .init = error_name_table_init,
+ .alignment = Builder.Alignment.fromByteUnits(slice_alignment),
+ };
+ try o.builder.llvm.globals.append(o.gpa, error_name_table_global);
+ _ = try o.builder.addGlobal(.empty, global);
+ try o.builder.variables.append(o.gpa, variable);
+
const error_name_table_ptr = error_name_table_global;
- error_name_table_ptr_global.setInitializer(error_name_table_ptr);
+ error_name_table_ptr_global.ptr(&o.builder).init = variable.global.toConst();
+ error_name_table_ptr_global.toLlvm(&o.builder).setInitializer(error_name_table_ptr);
}
- fn genCmpLtErrorsLenFunction(object: *Object) !void {
+ fn genCmpLtErrorsLenFunction(o: *Object) !void {
// If there is no such function in the module, it means the source code does not need it.
- const llvm_fn = object.llvm_module.getNamedFunction(lt_errors_fn_name) orelse return;
- const mod = object.module;
+ const name = o.builder.stringIfExists(lt_errors_fn_name) orelse return;
+ const llvm_fn = o.builder.getGlobal(name) orelse return;
+ const mod = o.module;
const errors_len = mod.global_error_set.count();
- // Delete previous implementation. We replace it with every flush() because the
- // total number of errors may have changed.
- while (llvm_fn.getFirstBasicBlock()) |bb| {
- bb.deleteBasicBlock();
- }
-
- const builder = object.context.createBuilder();
-
- const entry_block = object.context.appendBasicBlock(llvm_fn, "Entry");
- builder.positionBuilderAtEnd(entry_block);
- builder.clearCurrentDebugLocation();
+ var wip = try Builder.WipFunction.init(&o.builder, llvm_fn.ptrConst(&o.builder).kind.function);
+ defer wip.deinit();
+ wip.cursor = .{ .block = try wip.block(0, "Entry") };
// Example source of the following LLVM IR:
// fn __zig_lt_errors_len(index: u16) bool {
// return index < total_errors_len;
// }
- const lhs = llvm_fn.getParam(0);
- const rhs = lhs.typeOf().constInt(errors_len, .False);
- const is_lt = builder.buildICmp(.ULT, lhs, rhs, "");
- _ = builder.buildRet(is_lt);
+ const lhs = wip.arg(0);
+ const rhs = try o.builder.intValue(Builder.Type.err_int, errors_len);
+ const is_lt = try wip.icmp(.ult, lhs, rhs, "");
+ _ = try wip.ret(is_lt);
+ try wip.finish();
}
fn genModuleLevelAssembly(object: *Object) !void {
@@ -671,34 +1037,28 @@ pub const Object = struct {
// This map has externs with incorrect symbol names.
for (object.extern_collisions.keys()) |decl_index| {
- const entry = object.decl_map.getEntry(decl_index) orelse continue;
- const llvm_global = entry.value_ptr.*;
+ const global = object.decl_map.get(decl_index) orelse continue;
// Same logic as below but for externs instead of exports.
- const decl = mod.declPtr(decl_index);
- const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue;
- if (other_global == llvm_global) continue;
+ const decl_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue;
+ const other_global = object.builder.getGlobal(decl_name) orelse continue;
+ if (other_global.eql(global, &object.builder)) continue;
- llvm_global.replaceAllUsesWith(other_global);
- deleteLlvmGlobal(llvm_global);
- entry.value_ptr.* = other_global;
+ try global.replace(other_global, &object.builder);
}
object.extern_collisions.clearRetainingCapacity();
- const export_keys = mod.decl_exports.keys();
- for (mod.decl_exports.values(), 0..) |export_list, i| {
- const decl_index = export_keys[i];
- const llvm_global = object.decl_map.get(decl_index) orelse continue;
+ for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| {
+ const global = object.decl_map.get(decl_index) orelse continue;
for (export_list.items) |exp| {
// Detect if the LLVM global has already been created as an extern. In such
// case, we need to replace all uses of it with this exported global.
- const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
+ const exp_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue;
- const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue;
- if (other_global == llvm_global) continue;
+ const other_global = object.builder.getGlobal(exp_name) orelse continue;
+ if (other_global.eql(global, &object.builder)) continue;
- other_global.replaceAllUsesWith(llvm_global);
- llvm_global.takeName(other_global);
- deleteLlvmGlobal(other_global);
+ try global.takeName(other_global, &object.builder);
+ try other_global.replace(global, &object.builder);
// Problem: now we need to replace in the decl_map that
// the extern decl index points to this new global. However we don't
// know the decl index.
@@ -744,20 +1104,9 @@ pub const Object = struct {
if (comp.verbose_llvm_ir) |path| {
if (std.mem.eql(u8, path, "-")) {
- self.llvm_module.dump();
+ self.builder.dump();
} else {
- const path_z = try comp.gpa.dupeZ(u8, path);
- defer comp.gpa.free(path_z);
-
- var error_message: [*:0]const u8 = undefined;
-
- if (self.llvm_module.printModuleToFile(path_z, &error_message).toBool()) {
- defer llvm.disposeMessage(error_message);
-
- log.err("dump LLVM module failed ir={s}: {s}", .{
- path, error_message,
- });
- }
+ _ = try self.builder.printToFile(path);
}
}
@@ -884,7 +1233,9 @@ pub const Object = struct {
.err_msg = null,
};
- const llvm_func = try o.resolveLlvmFunction(decl_index);
+ const function = try o.resolveLlvmFunction(decl_index);
+ const global = function.ptrConst(&o.builder).global;
+ const llvm_func = global.toLlvm(&o.builder);
if (func.analysis(ip).is_noinline) {
o.addFnAttr(llvm_func, "noinline");
@@ -921,24 +1272,27 @@ pub const Object = struct {
o.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
}
- if (ip.stringToSliceUnwrap(decl.@"linksection")) |section|
+ if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| {
+ function.ptr(&o.builder).section = try o.builder.string(section);
llvm_func.setSection(section);
-
- // Remove all the basic blocks of a function in order to start over, generating
- // LLVM IR from an empty function body.
- while (llvm_func.getFirstBasicBlock()) |bb| {
- bb.deleteBasicBlock();
}
- const builder = o.context.createBuilder();
+ var deinit_wip = true;
+ var wip = try Builder.WipFunction.init(&o.builder, function);
+ defer if (deinit_wip) wip.deinit();
+ wip.cursor = .{ .block = try wip.block(0, "Entry") };
- const entry_block = o.context.appendBasicBlock(llvm_func, "Entry");
- builder.positionBuilderAtEnd(entry_block);
+ const builder = wip.llvm.builder;
+ var llvm_arg_i: u32 = 0;
// This gets the LLVM values from the function and stores them in `dg.args`.
const fn_info = mod.typeToFunc(decl.ty).?;
const sret = firstParamSRet(fn_info, mod);
- const ret_ptr = if (sret) llvm_func.getParam(0) else null;
+ const ret_ptr: Builder.Value = if (sret) param: {
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
+ break :param param;
+ } else .none;
const gpa = o.gpa;
if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) {
@@ -949,207 +1303,183 @@ pub const Object = struct {
const err_return_tracing = fn_info.return_type.toType().isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
- const err_ret_trace = if (err_return_tracing)
- llvm_func.getParam(@intFromBool(ret_ptr != null))
- else
- null;
+ const err_ret_trace: Builder.Value = if (err_return_tracing) param: {
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
+ break :param param;
+ } else .none;
// This is the list of args we will use that correspond directly to the AIR arg
// instructions. Depending on the calling convention, this list is not necessarily
// a bijection with the actual LLVM parameters of the function.
- var args = std.ArrayList(*llvm.Value).init(gpa);
- defer args.deinit();
+ var args: std.ArrayListUnmanaged(Builder.Value) = .{};
+ defer args.deinit(gpa);
{
- var llvm_arg_i = @as(c_uint, @intFromBool(ret_ptr != null)) + @intFromBool(err_return_tracing);
var it = iterateParamTypes(o, fn_info);
- while (it.next()) |lowering| switch (lowering) {
- .no_bits => continue,
- .byval => {
- assert(!it.byval_attr);
- const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types.get(ip)[param_index].toType();
- const param = llvm_func.getParam(llvm_arg_i);
- try args.ensureUnusedCapacity(1);
-
- if (isByRef(param_ty, mod)) {
- const alignment = param_ty.abiAlignment(mod);
- const param_llvm_ty = param.typeOf();
- const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
- const store_inst = builder.buildStore(param, arg_ptr);
- store_inst.setAlignment(alignment);
- args.appendAssumeCapacity(arg_ptr);
- } else {
- args.appendAssumeCapacity(param);
-
- o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, llvm_arg_i);
- }
- llvm_arg_i += 1;
- },
- .byref => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param_llvm_ty = try o.lowerType(param_ty);
- const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(mod);
-
- o.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
- llvm_arg_i += 1;
-
- try args.ensureUnusedCapacity(1);
-
- if (isByRef(param_ty, mod)) {
- args.appendAssumeCapacity(param);
- } else {
- const load_inst = builder.buildLoad(param_llvm_ty, param, "");
- load_inst.setAlignment(alignment);
- args.appendAssumeCapacity(load_inst);
- }
- },
- .byref_mut => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param_llvm_ty = try o.lowerType(param_ty);
- const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(mod);
-
- o.addArgAttr(llvm_func, llvm_arg_i, "noundef");
- llvm_arg_i += 1;
-
- try args.ensureUnusedCapacity(1);
+ while (try it.next()) |lowering| {
+ try args.ensureUnusedCapacity(gpa, 1);
+
+ switch (lowering) {
+ .no_bits => continue,
+ .byval => {
+ assert(!it.byval_attr);
+ const param_index = it.zig_index - 1;
+ const param_ty = fn_info.param_types.get(ip)[param_index].toType();
+ const param = wip.arg(llvm_arg_i);
+
+ if (isByRef(param_ty, mod)) {
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const param_llvm_ty = param.typeOfWip(&wip);
+ const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
+ _ = try wip.store(.normal, param, arg_ptr, alignment);
+ args.appendAssumeCapacity(arg_ptr);
+ } else {
+ args.appendAssumeCapacity(param);
- if (isByRef(param_ty, mod)) {
- args.appendAssumeCapacity(param);
- } else {
- const load_inst = builder.buildLoad(param_llvm_ty, param, "");
- load_inst.setAlignment(alignment);
- args.appendAssumeCapacity(load_inst);
- }
- },
- .abi_sized_int => {
- assert(!it.byval_attr);
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
+ o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, @intCast(llvm_arg_i));
+ }
+ llvm_arg_i += 1;
+ },
+ .byref => {
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const param = wip.arg(llvm_arg_i);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
- const param_llvm_ty = try o.lowerType(param_ty);
- const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
- const int_llvm_ty = o.context.intType(abi_size * 8);
- const alignment = @max(
- param_ty.abiAlignment(mod),
- o.target_data.abiAlignmentOfType(int_llvm_ty),
- );
- const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
- const store_inst = builder.buildStore(param, arg_ptr);
- store_inst.setAlignment(alignment);
+ o.addByRefParamAttrs(llvm_func, @intCast(llvm_arg_i), @intCast(alignment.toByteUnits() orelse 0), it.byval_attr, param_llvm_ty);
+ llvm_arg_i += 1;
- try args.ensureUnusedCapacity(1);
+ if (isByRef(param_ty, mod)) {
+ args.appendAssumeCapacity(param);
+ } else {
+ args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
+ }
+ },
+ .byref_mut => {
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const param = wip.arg(llvm_arg_i);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
- if (isByRef(param_ty, mod)) {
- args.appendAssumeCapacity(arg_ptr);
- } else {
- const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
- load_inst.setAlignment(alignment);
- args.appendAssumeCapacity(load_inst);
- }
- },
- .slice => {
- assert(!it.byval_attr);
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const ptr_info = param_ty.ptrInfo(mod);
+ o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noundef");
+ llvm_arg_i += 1;
- if (math.cast(u5, it.zig_index - 1)) |i| {
- if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
- o.addArgAttr(llvm_func, llvm_arg_i, "noalias");
+ if (isByRef(param_ty, mod)) {
+ args.appendAssumeCapacity(param);
+ } else {
+ args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, ""));
}
- }
- if (param_ty.zigTypeTag(mod) != .Optional) {
- o.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
- }
- if (ptr_info.flags.is_const) {
- o.addArgAttr(llvm_func, llvm_arg_i, "readonly");
- }
- const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
- @max(ptr_info.child.toType().abiAlignment(mod), 1);
- o.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
- const ptr_param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
- const len_param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
-
- const slice_llvm_ty = try o.lowerType(param_ty);
- const partial = builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr_param, 0, "");
- const aggregate = builder.buildInsertValue(partial, len_param, 1, "");
- try args.append(aggregate);
- },
- .multiple_llvm_types => {
- assert(!it.byval_attr);
- const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param_llvm_ty = try o.lowerType(param_ty);
- const param_alignment = param_ty.abiAlignment(mod);
- const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
- const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False);
- for (field_types, 0..) |_, field_i_usize| {
- const field_i = @as(c_uint, @intCast(field_i_usize));
- const param = llvm_func.getParam(llvm_arg_i);
+ },
+ .abi_sized_int => {
+ assert(!it.byval_attr);
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
- const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
- const store_inst = builder.buildStore(param, field_ptr);
- store_inst.setAlignment(target.ptrBitWidth() / 8);
- }
- const is_by_ref = isByRef(param_ty, mod);
- const loaded = if (is_by_ref) arg_ptr else l: {
- const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
- load_inst.setAlignment(param_alignment);
- break :l load_inst;
- };
- try args.append(loaded);
- },
- .as_u16 => {
- assert(!it.byval_attr);
- const param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
- const casted = builder.buildBitCast(param, o.context.halfType(), "");
- try args.ensureUnusedCapacity(1);
- args.appendAssumeCapacity(casted);
- },
- .float_array => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param_llvm_ty = try o.lowerType(param_ty);
- const param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8));
+ const alignment = Builder.Alignment.fromByteUnits(@max(
+ param_ty.abiAlignment(mod),
+ o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)),
+ ));
+ const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
+ _ = try wip.store(.normal, param, arg_ptr, alignment);
+
+ args.appendAssumeCapacity(if (isByRef(param_ty, mod))
+ arg_ptr
+ else
+ try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
+ },
+ .slice => {
+ assert(!it.byval_attr);
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const ptr_info = param_ty.ptrInfo(mod);
+
+ if (math.cast(u5, it.zig_index - 1)) |i| {
+ if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
+ o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noalias");
+ }
+ }
+ if (param_ty.zigTypeTag(mod) != .Optional) {
+ o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "nonnull");
+ }
+ if (ptr_info.flags.is_const) {
+ o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "readonly");
+ }
+ const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
+ @max(ptr_info.child.toType().abiAlignment(mod), 1);
+ o.addArgAttrInt(llvm_func, @intCast(llvm_arg_i), "align", elem_align);
+ const ptr_param = wip.arg(llvm_arg_i + 0);
+ const len_param = wip.arg(llvm_arg_i + 1);
+ llvm_arg_i += 2;
+
+ const slice_llvm_ty = try o.lowerType(param_ty);
+ args.appendAssumeCapacity(
+ try wip.buildAggregate(slice_llvm_ty, &.{ ptr_param, len_param }, ""),
+ );
+ },
+ .multiple_llvm_types => {
+ assert(!it.byval_attr);
+ const field_types = it.types_buffer[0..it.types_len];
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const param_alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target);
+ const llvm_ty = try o.builder.structType(.normal, field_types);
+ for (0..field_types.len) |field_i| {
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
+ const field_ptr = try wip.gepStruct(llvm_ty, arg_ptr, field_i, "");
+ const alignment =
+ Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
+ _ = try wip.store(.normal, param, field_ptr, alignment);
+ }
- const alignment = param_ty.abiAlignment(mod);
- const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
- _ = builder.buildStore(param, arg_ptr);
+ const is_by_ref = isByRef(param_ty, mod);
+ args.appendAssumeCapacity(if (is_by_ref)
+ arg_ptr
+ else
+ try wip.load(.normal, param_llvm_ty, arg_ptr, param_alignment, ""));
+ },
+ .as_u16 => {
+ assert(!it.byval_attr);
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
+ args.appendAssumeCapacity(try wip.cast(.bitcast, param, .half, ""));
+ },
+ .float_array => {
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
- if (isByRef(param_ty, mod)) {
- try args.append(arg_ptr);
- } else {
- const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
- load_inst.setAlignment(alignment);
- try args.append(load_inst);
- }
- },
- .i32_array, .i64_array => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const param_llvm_ty = try o.lowerType(param_ty);
- const param = llvm_func.getParam(llvm_arg_i);
- llvm_arg_i += 1;
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
+ _ = try wip.store(.normal, param, arg_ptr, alignment);
- const alignment = param_ty.abiAlignment(mod);
- const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
- _ = builder.buildStore(param, arg_ptr);
+ args.appendAssumeCapacity(if (isByRef(param_ty, mod))
+ arg_ptr
+ else
+ try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
+ },
+ .i32_array, .i64_array => {
+ const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const param = wip.arg(llvm_arg_i);
+ llvm_arg_i += 1;
- if (isByRef(param_ty, mod)) {
- try args.append(arg_ptr);
- } else {
- const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
- load_inst.setAlignment(alignment);
- try args.append(load_inst);
- }
- },
- };
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
+ _ = try wip.store(.normal, param, arg_ptr, alignment);
+
+ args.appendAssumeCapacity(if (isByRef(param_ty, mod))
+ arg_ptr
+ else
+ try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
+ },
+ }
+ }
}
var di_file: ?*llvm.DIFile = null;
@@ -1191,16 +1521,15 @@ pub const Object = struct {
.gpa = gpa,
.air = air,
.liveness = liveness,
- .context = o.context,
.dg = &dg,
+ .wip = wip,
.builder = builder,
.ret_ptr = ret_ptr,
.args = args.items,
.arg_index = 0,
.func_inst_table = .{},
- .llvm_func = llvm_func,
.blocks = .{},
- .single_threaded = mod.comp.bin_file.options.single_threaded,
+ .sync_scope = if (mod.comp.bin_file.options.single_threaded) .singlethread else .system,
.di_scope = di_scope,
.di_file = di_file,
.base_line = dg.decl.src_line,
@@ -1209,6 +1538,7 @@ pub const Object = struct {
.err_ret_trace = err_ret_trace,
};
defer fg.deinit();
+ deinit_wip = false;
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
@@ -1220,6 +1550,8 @@ pub const Object = struct {
else => |e| return e,
};
+ try fg.wip.finish();
+
try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
@@ -1243,14 +1575,6 @@ pub const Object = struct {
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
- /// TODO replace this with a call to `Module::getNamedValue`. This will require adding
- /// a new wrapper in zig_llvm.h/zig_llvm.cpp.
- fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*llvm.Value {
- if (o.llvm_module.getNamedFunction(name)) |x| return x;
- if (o.llvm_module.getNamedGlobal(name)) |x| return x;
- return null;
- }
-
pub fn updateDeclExports(
self: *Object,
mod: *Module,
@@ -1260,93 +1584,133 @@ pub const Object = struct {
const gpa = mod.gpa;
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
- const llvm_global = self.decl_map.get(decl_index) orelse return;
+ const global = self.decl_map.get(decl_index) orelse return;
+ const llvm_global = global.toLlvm(&self.builder);
const decl = mod.declPtr(decl_index);
if (decl.isExtern(mod)) {
- var free_decl_name = false;
const decl_name = decl_name: {
const decl_name = mod.intern_pool.stringToSlice(decl.name);
if (mod.getTarget().isWasm() and try decl.isFunction(mod)) {
if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
if (!std.mem.eql(u8, lib_name, "c")) {
- free_decl_name = true;
- break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{
- decl_name, lib_name,
- });
+ break :decl_name try self.builder.fmt("{s}|{s}", .{ decl_name, lib_name });
}
}
}
- break :decl_name decl_name;
+ break :decl_name try self.builder.string(decl_name);
};
- defer if (free_decl_name) gpa.free(decl_name);
- llvm_global.setValueName(decl_name);
- if (self.getLlvmGlobal(decl_name)) |other_global| {
- if (other_global != llvm_global) {
+ if (self.builder.getGlobal(decl_name)) |other_global| {
+ if (other_global.toLlvm(&self.builder) != llvm_global) {
try self.extern_collisions.put(gpa, decl_index, {});
}
}
+
+ try global.rename(decl_name, &self.builder);
+ global.ptr(&self.builder).unnamed_addr = .default;
llvm_global.setUnnamedAddr(.False);
+ global.ptr(&self.builder).linkage = .external;
llvm_global.setLinkage(.External);
- if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) {
+ global.ptr(&self.builder).dll_storage_class = .default;
+ llvm_global.setDLLStorageClass(.Default);
+ }
if (self.di_map.get(decl)) |di_node| {
+ const decl_name_slice = decl_name.toSlice(&self.builder).?;
if (try decl.isFunction(mod)) {
- const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
- const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
+ const di_func: *llvm.DISubprogram = @ptrCast(di_node);
+ const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len);
di_func.replaceLinkageName(linkage_name);
} else {
- const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
- const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
+ const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node);
+ const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len);
di_global.replaceLinkageName(linkage_name);
}
}
- if (decl.val.getVariable(mod)) |variable| {
- if (variable.is_threadlocal) {
+ if (decl.val.getVariable(mod)) |decl_var| {
+ if (decl_var.is_threadlocal) {
+ global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local =
+ .generaldynamic;
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
+ global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local =
+ .default;
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.is_weak_linkage) {
+ if (decl_var.is_weak_linkage) {
+ global.ptr(&self.builder).linkage = .extern_weak;
llvm_global.setLinkage(.ExternalWeak);
}
}
+ global.ptr(&self.builder).updateAttributes();
} else if (exports.len != 0) {
- const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name);
- llvm_global.setValueName2(exp_name.ptr, exp_name.len);
+ const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name));
+ try global.rename(exp_name, &self.builder);
+ global.ptr(&self.builder).unnamed_addr = .default;
llvm_global.setUnnamedAddr(.False);
- if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
+ if (mod.wantDllExports()) {
+ global.ptr(&self.builder).dll_storage_class = .dllexport;
+ llvm_global.setDLLStorageClass(.DLLExport);
+ }
if (self.di_map.get(decl)) |di_node| {
+ const exp_name_slice = exp_name.toSlice(&self.builder).?;
if (try decl.isFunction(mod)) {
- const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
- const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
+ const di_func: *llvm.DISubprogram = @ptrCast(di_node);
+ const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len);
di_func.replaceLinkageName(linkage_name);
} else {
- const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
- const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
+ const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node);
+ const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len);
di_global.replaceLinkageName(linkage_name);
}
}
switch (exports[0].opts.linkage) {
.Internal => unreachable,
- .Strong => llvm_global.setLinkage(.External),
- .Weak => llvm_global.setLinkage(.WeakODR),
- .LinkOnce => llvm_global.setLinkage(.LinkOnceODR),
+ .Strong => {
+ global.ptr(&self.builder).linkage = .external;
+ llvm_global.setLinkage(.External);
+ },
+ .Weak => {
+ global.ptr(&self.builder).linkage = .weak_odr;
+ llvm_global.setLinkage(.WeakODR);
+ },
+ .LinkOnce => {
+ global.ptr(&self.builder).linkage = .linkonce_odr;
+ llvm_global.setLinkage(.LinkOnceODR);
+ },
}
switch (exports[0].opts.visibility) {
- .default => llvm_global.setVisibility(.Default),
- .hidden => llvm_global.setVisibility(.Hidden),
- .protected => llvm_global.setVisibility(.Protected),
+ .default => {
+ global.ptr(&self.builder).visibility = .default;
+ llvm_global.setVisibility(.Default);
+ },
+ .hidden => {
+ global.ptr(&self.builder).visibility = .hidden;
+ llvm_global.setVisibility(.Hidden);
+ },
+ .protected => {
+ global.ptr(&self.builder).visibility = .protected;
+ llvm_global.setVisibility(.Protected);
+ },
}
if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| {
+ switch (global.ptrConst(&self.builder).kind) {
+ inline .variable, .function => |impl_index| impl_index.ptr(&self.builder).section =
+ try self.builder.string(section),
+ else => unreachable,
+ }
llvm_global.setSection(section);
}
- if (decl.val.getVariable(mod)) |variable| {
- if (variable.is_threadlocal) {
+ if (decl.val.getVariable(mod)) |decl_var| {
+ if (decl_var.is_threadlocal) {
+ global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local =
+ .generaldynamic;
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
}
}
+ global.ptr(&self.builder).updateAttributes();
// If a Decl is exported more than one time (which is rare),
// we add aliases for all but the first export.
@@ -1361,7 +1725,7 @@ pub const Object = struct {
alias.setAliasee(llvm_global);
} else {
_ = self.llvm_module.addAlias(
- llvm_global.globalGetValueType(),
+ global.ptrConst(&self.builder).type.toLlvm(&self.builder),
0,
llvm_global,
exp_name_z,
@@ -1369,32 +1733,42 @@ pub const Object = struct {
}
}
} else {
- const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
- llvm_global.setValueName2(fqn.ptr, fqn.len);
+ const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)));
+ try global.rename(fqn, &self.builder);
+ global.ptr(&self.builder).linkage = .internal;
llvm_global.setLinkage(.Internal);
- if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) {
+ global.ptr(&self.builder).dll_storage_class = .default;
+ llvm_global.setDLLStorageClass(.Default);
+ }
+ global.ptr(&self.builder).unnamed_addr = .unnamed_addr;
llvm_global.setUnnamedAddr(.True);
- if (decl.val.getVariable(mod)) |variable| {
+ if (decl.val.getVariable(mod)) |decl_var| {
const single_threaded = mod.comp.bin_file.options.single_threaded;
- if (variable.is_threadlocal and !single_threaded) {
+ if (decl_var.is_threadlocal and !single_threaded) {
+ global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local =
+ .generaldynamic;
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
+ global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local =
+ .default;
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
}
+ global.ptr(&self.builder).updateAttributes();
}
}
pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void {
- const llvm_value = self.decl_map.get(decl_index) orelse return;
- llvm_value.deleteGlobal();
+ const global = self.decl_map.get(decl_index) orelse return;
+ global.toLlvm(&self.builder).deleteGlobal();
}
fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile {
const gop = try o.di_map.getOrPut(gpa, file);
errdefer assert(o.di_map.remove(file));
if (gop.found_existing) {
- return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*));
+ return @ptrCast(gop.value_ptr.*);
}
const dir_path_z = d: {
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
@@ -1542,7 +1916,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
enumerators.ptr,
- @as(c_int, @intCast(enumerators.len)),
+ @intCast(enumerators.len),
try o.lowerDebugType(int_ty, .full),
"",
);
@@ -1717,7 +2091,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
try o.lowerDebugType(ty.childType(mod), .full),
- @as(i64, @intCast(ty.arrayLen(mod))),
+ @intCast(ty.arrayLen(mod)),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
@@ -2022,7 +2396,7 @@ pub const Object = struct {
0, // flags
null, // derived from
di_fields.items.ptr,
- @as(c_int, @intCast(di_fields.items.len)),
+ @intCast(di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
@@ -2109,7 +2483,7 @@ pub const Object = struct {
0, // flags
null, // derived from
di_fields.items.ptr,
- @as(c_int, @intCast(di_fields.items.len)),
+ @intCast(di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
@@ -2221,7 +2595,7 @@ pub const Object = struct {
ty.abiAlignment(mod) * 8, // align in bits
0, // flags
di_fields.items.ptr,
- @as(c_int, @intCast(di_fields.items.len)),
+ @intCast(di_fields.items.len),
0, // run time lang
"", // unique id
);
@@ -2334,7 +2708,7 @@ pub const Object = struct {
const fn_di_ty = dib.createSubroutineType(
param_di_types.items.ptr,
- @as(c_int, @intCast(param_di_types.items.len)),
+ @intCast(param_di_types.items.len),
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -2420,52 +2794,16 @@ pub const Object = struct {
return buffer.toOwnedSliceSentinel(0);
}
- fn getNullOptAddr(o: *Object) !*llvm.Value {
- if (o.null_opt_addr) |global| return global;
-
- const mod = o.module;
- const target = mod.getTarget();
- const ty = try mod.intern(.{ .opt_type = .usize_type });
- const null_opt_usize = try mod.intern(.{ .opt = .{
- .ty = ty,
- .val = .none,
- } });
-
- const llvm_init = try o.lowerValue(.{
- .ty = ty.toType(),
- .val = null_opt_usize.toValue(),
- });
- const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target);
- const global = o.llvm_module.addGlobalInAddressSpace(
- llvm_init.typeOf(),
- "",
- llvm_actual_addrspace,
- );
- global.setLinkage(.Internal);
- global.setUnnamedAddr(.True);
- global.setAlignment(ty.toType().abiAlignment(mod));
- global.setInitializer(llvm_init);
-
- const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace)
- global.constAddrSpaceCast(o.context.pointerType(llvm_wanted_addrspace))
- else
- global;
-
- o.null_opt_addr = addrspace_casted_global;
- return addrspace_casted_global;
- }
-
/// If the llvm function does not exist, create it.
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
- fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) !*llvm.Value {
+ fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Function.Index {
const mod = o.module;
const gpa = o.gpa;
const decl = mod.declPtr(decl_index);
const zig_fn_type = decl.ty;
const gop = try o.decl_map.getOrPut(gpa, decl_index);
- if (gop.found_existing) return gop.value_ptr.*;
+ if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
assert(decl.has_tv);
const fn_info = mod.typeToFunc(zig_fn_type).?;
@@ -2474,16 +2812,25 @@ pub const Object = struct {
const fn_type = try o.lowerType(zig_fn_type);
- const fqn = try decl.getFullyQualifiedName(mod);
const ip = &mod.intern_pool;
+ const fqn = try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod)));
const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_fn = o.llvm_module.addFunctionInAddressSpace(ip.stringToSlice(fqn), fn_type, llvm_addrspace);
- gop.value_ptr.* = llvm_fn;
+ const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder), @intFromEnum(llvm_addrspace));
+
+ var global = Builder.Global{
+ .type = fn_type,
+ .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) },
+ };
+ var function = Builder.Function{
+ .global = @enumFromInt(o.builder.globals.count()),
+ };
const is_extern = decl.isExtern(mod);
if (!is_extern) {
+ global.linkage = .internal;
llvm_fn.setLinkage(.Internal);
+ global.unnamed_addr = .unnamed_addr;
llvm_fn.setUnnamedAddr(.True);
} else {
if (target.isWasm()) {
@@ -2500,7 +2847,7 @@ pub const Object = struct {
o.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
o.addArgAttr(llvm_fn, 0, "noalias");
- const raw_llvm_ret_ty = try o.lowerType(fn_info.return_type.toType());
+ const raw_llvm_ret_ty = (try o.lowerType(fn_info.return_type.toType())).toLlvm(&o.builder);
llvm_fn.addSretAttr(raw_llvm_ret_ty);
}
@@ -2528,7 +2875,8 @@ pub const Object = struct {
}
if (fn_info.alignment.toByteUnitsOptional()) |a| {
- llvm_fn.setAlignment(@as(c_uint, @intCast(a)));
+ function.alignment = Builder.Alignment.fromByteUnits(a);
+ llvm_fn.setAlignment(@intCast(a));
}
// Function attributes that are independent of analysis results of the function body.
@@ -2544,7 +2892,7 @@ pub const Object = struct {
var it = iterateParamTypes(o, fn_info);
it.llvm_index += @intFromBool(sret);
it.llvm_index += @intFromBool(err_return_tracing);
- while (it.next()) |lowering| switch (lowering) {
+ while (try it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types.get(ip)[param_index].toType();
@@ -2576,7 +2924,10 @@ pub const Object = struct {
};
}
- return llvm_fn;
+ try o.builder.llvm.globals.append(o.gpa, llvm_fn);
+ gop.value_ptr.* = try o.builder.addGlobal(fqn, global);
+ try o.builder.functions.append(o.gpa, function);
+ return global.kind.function;
}
fn addCommonFnAttributes(o: *Object, llvm_fn: *llvm.Value) void {
@@ -2622,65 +2973,80 @@ pub const Object = struct {
}
}
- fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Error!*llvm.Value {
+ fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Variable.Index {
const gop = try o.decl_map.getOrPut(o.gpa, decl_index);
- if (gop.found_existing) return gop.value_ptr.*;
+ if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable;
errdefer assert(o.decl_map.remove(decl_index));
const mod = o.module;
const decl = mod.declPtr(decl_index);
- const fqn = try decl.getFullyQualifiedName(mod);
+ const fqn = try o.builder.string(mod.intern_pool.stringToSlice(
+ try decl.getFullyQualifiedName(mod),
+ ));
const target = mod.getTarget();
- const llvm_type = try o.lowerType(decl.ty);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
+ var global = Builder.Global{
+ .addr_space = toLlvmGlobalAddressSpace(decl.@"addrspace", target),
+ .type = try o.lowerType(decl.ty),
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
+ };
+ var variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ };
+ const is_extern = decl.isExtern(mod);
+ const name = if (is_extern)
+ try o.builder.string(mod.intern_pool.stringToSlice(decl.name))
+ else
+ fqn;
const llvm_global = o.llvm_module.addGlobalInAddressSpace(
- llvm_type,
- mod.intern_pool.stringToSlice(fqn),
- llvm_actual_addrspace,
+ global.type.toLlvm(&o.builder),
+ fqn.toSlice(&o.builder).?,
+ @intFromEnum(global.addr_space),
);
- gop.value_ptr.* = llvm_global;
// This is needed for declarations created by `@extern`.
- if (decl.isExtern(mod)) {
- llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name));
+ if (is_extern) {
+ global.unnamed_addr = .default;
llvm_global.setUnnamedAddr(.False);
+ global.linkage = .external;
llvm_global.setLinkage(.External);
- if (decl.val.getVariable(mod)) |variable| {
+ if (decl.val.getVariable(mod)) |decl_var| {
const single_threaded = mod.comp.bin_file.options.single_threaded;
- if (variable.is_threadlocal and !single_threaded) {
+ if (decl_var.is_threadlocal and !single_threaded) {
+ variable.thread_local = .generaldynamic;
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
+ variable.thread_local = .default;
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
+ if (decl_var.is_weak_linkage) {
+ global.linkage = .extern_weak;
+ llvm_global.setLinkage(.ExternalWeak);
+ }
}
} else {
+ global.linkage = .internal;
llvm_global.setLinkage(.Internal);
+ global.unnamed_addr = .unnamed_addr;
llvm_global.setUnnamedAddr(.True);
}
- return llvm_global;
- }
-
- fn isUnnamedType(o: *Object, ty: Type, val: *llvm.Value) bool {
- // Once `lowerType` succeeds, successive calls to it with the same Zig type
- // are guaranteed to succeed. So if a call to `lowerType` fails here it means
- // it is the first time lowering the type, which means the value can't possible
- // have that type.
- const llvm_ty = o.lowerType(ty) catch return true;
- return val.typeOf() != llvm_ty;
+ try o.builder.llvm.globals.append(o.gpa, llvm_global);
+ gop.value_ptr.* = try o.builder.addGlobal(name, global);
+ try o.builder.variables.append(o.gpa, variable);
+ return global.kind.variable;
}
- fn lowerType(o: *Object, t: Type) Allocator.Error!*llvm.Type {
- const llvm_ty = try lowerTypeInner(o, t);
+ fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type {
+ const ty = try o.lowerTypeInner(t);
const mod = o.module;
if (std.debug.runtime_safety and false) check: {
+ const llvm_ty = ty.toLlvm(&o.builder);
if (t.zigTypeTag(mod) == .Opaque) break :check;
if (!t.hasRuntimeBits(mod)) break :check;
- if (!llvm_ty.isSized().toBool()) break :check;
+ if (!try ty.isSized(&o.builder)) break :check;
const zig_size = t.abiSize(mod);
const llvm_size = o.target_data.abiSizeOfType(llvm_ty);
@@ -2690,456 +3056,511 @@ pub const Object = struct {
});
}
}
- return llvm_ty;
+ return ty;
}
- fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!*llvm.Type {
- const gpa = o.gpa;
+ fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!Builder.Type {
const mod = o.module;
const target = mod.getTarget();
- switch (t.zigTypeTag(mod)) {
- .Void, .NoReturn => return o.context.voidType(),
- .Int => {
- const info = t.intInfo(mod);
- assert(info.bits != 0);
- return o.context.intType(info.bits);
- },
- .Enum => {
- const int_ty = t.intTagType(mod);
- const bit_count = int_ty.intInfo(mod).bits;
- assert(bit_count != 0);
- return o.context.intType(bit_count);
- },
- .Float => switch (t.floatBits(target)) {
- 16 => return if (backendSupportsF16(target)) o.context.halfType() else o.context.intType(16),
- 32 => return o.context.floatType(),
- 64 => return o.context.doubleType(),
- 80 => return if (backendSupportsF80(target)) o.context.x86FP80Type() else o.context.intType(80),
- 128 => return o.context.fp128Type(),
+ return switch (t.toIntern()) {
+ .u0_type, .i0_type => unreachable,
+ inline .u1_type,
+ .u8_type,
+ .i8_type,
+ .u16_type,
+ .i16_type,
+ .u29_type,
+ .u32_type,
+ .i32_type,
+ .u64_type,
+ .i64_type,
+ .u80_type,
+ .u128_type,
+ .i128_type,
+ => |tag| @field(Builder.Type, "i" ++ @tagName(tag)[1 .. @tagName(tag).len - "_type".len]),
+ .usize_type, .isize_type => try o.builder.intType(target.ptrBitWidth()),
+ inline .c_char_type,
+ .c_short_type,
+ .c_ushort_type,
+ .c_int_type,
+ .c_uint_type,
+ .c_long_type,
+ .c_ulong_type,
+ .c_longlong_type,
+ .c_ulonglong_type,
+ => |tag| try o.builder.intType(target.c_type_bit_size(
+ @field(std.Target.CType, @tagName(tag)["c_".len .. @tagName(tag).len - "_type".len]),
+ )),
+ .c_longdouble_type,
+ .f16_type,
+ .f32_type,
+ .f64_type,
+ .f80_type,
+ .f128_type,
+ => switch (t.floatBits(target)) {
+ 16 => if (backendSupportsF16(target)) .half else .i16,
+ 32 => .float,
+ 64 => .double,
+ 80 => if (backendSupportsF80(target)) .x86_fp80 else .i80,
+ 128 => .fp128,
else => unreachable,
},
- .Bool => return o.context.intType(1),
- .Pointer => {
- if (t.isSlice(mod)) {
- const ptr_type = t.slicePtrFieldType(mod);
-
- const fields: [2]*llvm.Type = .{
- try o.lowerType(ptr_type),
- try o.lowerType(Type.usize),
+ .anyopaque_type => unreachable,
+ .bool_type => .i1,
+ .void_type => .void,
+ .type_type => unreachable,
+ .anyerror_type => Builder.Type.err_int,
+ .comptime_int_type,
+ .comptime_float_type,
+ .noreturn_type,
+ => unreachable,
+ .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"),
+ .null_type,
+ .undefined_type,
+ .enum_literal_type,
+ .atomic_order_type,
+ .atomic_rmw_op_type,
+ .calling_convention_type,
+ .address_space_type,
+ .float_mode_type,
+ .reduce_op_type,
+ .call_modifier_type,
+ .prefetch_options_type,
+ .export_options_type,
+ .extern_options_type,
+ .type_info_type,
+ => unreachable,
+ .manyptr_u8_type,
+ .manyptr_const_u8_type,
+ .manyptr_const_u8_sentinel_0_type,
+ .single_const_pointer_to_comptime_int_type,
+ => .ptr,
+ .slice_const_u8_type,
+ .slice_const_u8_sentinel_0_type,
+ => try o.builder.structType(.normal, &.{ .ptr, try o.lowerType(Type.usize) }),
+ .optional_noreturn_type => unreachable,
+ .anyerror_void_error_union_type,
+ .adhoc_inferred_error_set_type,
+ => Builder.Type.err_int,
+ .generic_poison_type,
+ .empty_struct_type,
+ => unreachable,
+ // values, not types
+ .undef,
+ .zero,
+ .zero_usize,
+ .zero_u8,
+ .one,
+ .one_usize,
+ .one_u8,
+ .four_u8,
+ .negative_one,
+ .calling_convention_c,
+ .calling_convention_inline,
+ .void_value,
+ .unreachable_value,
+ .null_value,
+ .bool_true,
+ .bool_false,
+ .empty_struct,
+ .generic_poison,
+ .var_args_param_type,
+ .none,
+ => unreachable,
+ else => switch (mod.intern_pool.indexToKey(t.toIntern())) {
+ .int_type => |int_type| try o.builder.intType(int_type.bits),
+ .ptr_type => |ptr_type| type: {
+ const ptr_ty = try o.builder.ptrType(
+ toLlvmAddressSpace(ptr_type.flags.address_space, target),
+ );
+ break :type switch (ptr_type.flags.size) {
+ .One, .Many, .C => ptr_ty,
+ .Slice => try o.builder.structType(.normal, &.{
+ ptr_ty,
+ try o.lowerType(Type.usize),
+ }),
};
- return o.context.structType(&fields, fields.len, .False);
- }
- const ptr_info = t.ptrInfo(mod);
- const llvm_addrspace = toLlvmAddressSpace(ptr_info.flags.address_space, target);
- return o.context.pointerType(llvm_addrspace);
- },
- .Opaque => {
- if (t.toIntern() == .anyopaque_type) return o.context.intType(8);
-
- const gop = try o.type_map.getOrPut(gpa, t.toIntern());
- if (gop.found_existing) return gop.value_ptr.*;
-
- const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type;
- const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type));
-
- const llvm_struct_ty = o.context.structCreateNamed(name);
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
- return llvm_struct_ty;
- },
- .Array => {
- const elem_ty = t.childType(mod);
- if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null);
- const elem_llvm_ty = try o.lowerType(elem_ty);
- const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null);
- return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len)));
- },
- .Vector => {
- const elem_type = try o.lowerType(t.childType(mod));
- return elem_type.vectorType(t.vectorLen(mod));
- },
- .Optional => {
- const child_ty = t.optionalChild(mod);
- if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return o.context.intType(8);
- }
- const payload_llvm_ty = try o.lowerType(child_ty);
- if (t.optionalReprIsPayload(mod)) {
- return payload_llvm_ty;
- }
-
- comptime assert(optional_layout_version == 3);
- var fields_buf: [3]*llvm.Type = .{
- payload_llvm_ty, o.context.intType(8), undefined,
- };
- const offset = child_ty.abiSize(mod) + 1;
- const abi_size = t.abiSize(mod);
- const padding = @as(c_uint, @intCast(abi_size - offset));
- if (padding == 0) {
- return o.context.structType(&fields_buf, 2, .False);
- }
- fields_buf[2] = o.context.intType(8).arrayType(padding);
- return o.context.structType(&fields_buf, 3, .False);
- },
- .ErrorUnion => {
- const payload_ty = t.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return try o.lowerType(Type.anyerror);
- }
- const llvm_error_type = try o.lowerType(Type.anyerror);
- const llvm_payload_type = try o.lowerType(payload_ty);
-
- const payload_align = payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
-
- const payload_size = payload_ty.abiSize(mod);
- const error_size = Type.anyerror.abiSize(mod);
-
- var fields_buf: [3]*llvm.Type = undefined;
- if (error_align > payload_align) {
- fields_buf[0] = llvm_error_type;
- fields_buf[1] = llvm_payload_type;
- const payload_end =
- std.mem.alignForward(u64, error_size, payload_align) +
- payload_size;
- const abi_size = std.mem.alignForward(u64, payload_end, error_align);
- const padding = @as(c_uint, @intCast(abi_size - payload_end));
- if (padding == 0) {
- return o.context.structType(&fields_buf, 2, .False);
+ },
+ .array_type => |array_type| o.builder.arrayType(
+ array_type.len + @intFromBool(array_type.sentinel != .none),
+ try o.lowerType(array_type.child.toType()),
+ ),
+ .vector_type => |vector_type| o.builder.vectorType(
+ .normal,
+ vector_type.len,
+ try o.lowerType(vector_type.child.toType()),
+ ),
+ .opt_type => |child_ty| {
+ if (!child_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) return .i8;
+
+ const payload_ty = try o.lowerType(child_ty.toType());
+ if (t.optionalReprIsPayload(mod)) return payload_ty;
+
+ comptime assert(optional_layout_version == 3);
+ var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined };
+ var fields_len: usize = 2;
+ const offset = child_ty.toType().abiSize(mod) + 1;
+ const abi_size = t.abiSize(mod);
+ const padding_len = abi_size - offset;
+ if (padding_len > 0) {
+ fields[2] = try o.builder.arrayType(padding_len, .i8);
+ fields_len = 3;
}
- fields_buf[2] = o.context.intType(8).arrayType(padding);
- return o.context.structType(&fields_buf, 3, .False);
- } else {
- fields_buf[0] = llvm_payload_type;
- fields_buf[1] = llvm_error_type;
- const error_end =
- std.mem.alignForward(u64, payload_size, error_align) +
- error_size;
- const abi_size = std.mem.alignForward(u64, error_end, payload_align);
- const padding = @as(c_uint, @intCast(abi_size - error_end));
- if (padding == 0) {
- return o.context.structType(&fields_buf, 2, .False);
+ return o.builder.structType(.normal, fields[0..fields_len]);
+ },
+ .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"),
+ .error_union_type => |error_union_type| {
+ const error_type = Builder.Type.err_int;
+ if (!error_union_type.payload_type.toType().hasRuntimeBitsIgnoreComptime(mod))
+ return error_type;
+ const payload_type = try o.lowerType(error_union_type.payload_type.toType());
+
+ const payload_align = error_union_type.payload_type.toType().abiAlignment(mod);
+ const error_align = Type.err_int.abiAlignment(mod);
+
+ const payload_size = error_union_type.payload_type.toType().abiSize(mod);
+ const error_size = Type.err_int.abiSize(mod);
+
+ var fields: [3]Builder.Type = undefined;
+ var fields_len: usize = 2;
+ const padding_len = if (error_align > payload_align) pad: {
+ fields[0] = error_type;
+ fields[1] = payload_type;
+ const payload_end =
+ std.mem.alignForward(u64, error_size, payload_align) +
+ payload_size;
+ const abi_size = std.mem.alignForward(u64, payload_end, error_align);
+ break :pad abi_size - payload_end;
+ } else pad: {
+ fields[0] = payload_type;
+ fields[1] = error_type;
+ const error_end =
+ std.mem.alignForward(u64, payload_size, error_align) +
+ error_size;
+ const abi_size = std.mem.alignForward(u64, error_end, payload_align);
+ break :pad abi_size - error_end;
+ };
+ if (padding_len > 0) {
+ fields[2] = try o.builder.arrayType(padding_len, .i8);
+ fields_len = 3;
}
- fields_buf[2] = o.context.intType(8).arrayType(padding);
- return o.context.structType(&fields_buf, 3, .False);
- }
- },
- .ErrorSet => return o.context.intType(16),
- .Struct => {
- const gop = try o.type_map.getOrPut(gpa, t.toIntern());
- if (gop.found_existing) return gop.value_ptr.*;
-
- const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) {
- .anon_struct_type => |tuple| {
- const llvm_struct_ty = o.context.structCreateNamed("");
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+ return o.builder.structType(.normal, fields[0..fields_len]);
+ },
+ .simple_type => unreachable,
+ .struct_type => |struct_type| {
+ const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
+ if (gop.found_existing) return gop.value_ptr.*;
- var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
- defer llvm_field_types.deinit(gpa);
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
+ const int_ty = try o.lowerType(struct_obj.backing_int_ty);
+ gop.value_ptr.* = int_ty;
+ return int_ty;
+ }
- try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
+ const name = try o.builder.string(mod.intern_pool.stringToSlice(
+ try struct_obj.getFullyQualifiedName(mod),
+ ));
+ const ty = try o.builder.opaqueType(name);
+ gop.value_ptr.* = ty; // must be done before any recursive calls
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
+ assert(struct_obj.haveFieldTypes());
- for (tuple.types, tuple.values) |field_ty, field_val| {
- if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+ var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
+ defer llvm_field_types.deinit(o.gpa);
+ try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_obj.fields.count());
- const field_align = field_ty.toType().abiAlignment(mod);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForward(u64, offset, field_align);
-
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- try llvm_field_types.append(gpa, llvm_array_ty);
- }
- const field_llvm_ty = try o.lowerType(field_ty.toType());
- try llvm_field_types.append(gpa, field_llvm_ty);
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 1;
+ var struct_kind: Builder.Type.Structure.Kind = .normal;
- offset += field_ty.toType().abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForward(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- try llvm_field_types.append(gpa, llvm_array_ty);
- }
- }
+ var it = struct_obj.runtimeFieldIterator(mod);
+ while (it.next()) |field_and_index| {
+ const field = field_and_index.field;
+ const field_align = field.alignment(mod, struct_obj.layout);
+ const field_ty_align = field.ty.abiAlignment(mod);
+ if (field_align < field_ty_align) struct_kind = .@"packed";
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, field_align);
- llvm_struct_ty.structSetBody(
- llvm_field_types.items.ptr,
- @as(c_uint, @intCast(llvm_field_types.items.len)),
- .False,
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) try llvm_field_types.append(
+ o.gpa,
+ try o.builder.arrayType(padding_len, .i8),
);
+ try llvm_field_types.append(o.gpa, try o.lowerType(field.ty));
- return llvm_struct_ty;
- },
- .struct_type => |struct_type| struct_type,
- else => unreachable,
- };
-
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
-
- if (struct_obj.layout == .Packed) {
- assert(struct_obj.haveLayout());
- const int_llvm_ty = try o.lowerType(struct_obj.backing_int_ty);
- gop.value_ptr.* = int_llvm_ty;
- return int_llvm_ty;
- }
-
- const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod));
+ offset += field.ty.abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) try llvm_field_types.append(
+ o.gpa,
+ try o.builder.arrayType(padding_len, .i8),
+ );
+ }
- const llvm_struct_ty = o.context.structCreateNamed(name);
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+ try o.builder.namedTypeSetBody(
+ ty,
+ try o.builder.structType(struct_kind, llvm_field_types.items),
+ );
+ return ty;
+ },
+ .anon_struct_type => |anon_struct_type| {
+ var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .{};
+ defer llvm_field_types.deinit(o.gpa);
+ try llvm_field_types.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len);
- assert(struct_obj.haveFieldTypes());
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
- var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
- defer llvm_field_types.deinit(gpa);
+ for (anon_struct_type.types, anon_struct_type.values) |field_ty, field_val| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
- try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count());
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, field_align);
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 1;
- var any_underaligned_fields = false;
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) try llvm_field_types.append(
+ o.gpa,
+ try o.builder.arrayType(padding_len, .i8),
+ );
+ try llvm_field_types.append(o.gpa, try o.lowerType(field_ty.toType()));
- var it = struct_obj.runtimeFieldIterator(mod);
- while (it.next()) |field_and_index| {
- const field = field_and_index.field;
- const field_align = field.alignment(mod, struct_obj.layout);
- const field_ty_align = field.ty.abiAlignment(mod);
- any_underaligned_fields = any_underaligned_fields or
- field_align < field_ty_align;
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForward(u64, offset, field_align);
-
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- try llvm_field_types.append(gpa, llvm_array_ty);
+ offset += field_ty.toType().abiSize(mod);
}
- const field_llvm_ty = try o.lowerType(field.ty);
- try llvm_field_types.append(gpa, field_llvm_ty);
-
- offset += field.ty.abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForward(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- try llvm_field_types.append(gpa, llvm_array_ty);
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) try llvm_field_types.append(
+ o.gpa,
+ try o.builder.arrayType(padding_len, .i8),
+ );
}
- }
-
- llvm_struct_ty.structSetBody(
- llvm_field_types.items.ptr,
- @as(c_uint, @intCast(llvm_field_types.items.len)),
- llvm.Bool.fromBool(any_underaligned_fields),
- );
-
- return llvm_struct_ty;
- },
- .Union => {
- const gop = try o.type_map.getOrPut(gpa, t.toIntern());
- if (gop.found_existing) return gop.value_ptr.*;
-
- const layout = t.unionGetLayout(mod);
- const union_obj = mod.typeToUnion(t).?;
+ return o.builder.structType(.normal, llvm_field_types.items);
+ },
+ .union_type => |union_type| {
+ const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
+ if (gop.found_existing) return gop.value_ptr.*;
- if (union_obj.layout == .Packed) {
- const bitsize = @as(c_uint, @intCast(t.bitSize(mod)));
- const int_llvm_ty = o.context.intType(bitsize);
- gop.value_ptr.* = int_llvm_ty;
- return int_llvm_ty;
- }
+ const union_obj = mod.unionPtr(union_type.index);
+ const layout = union_obj.getLayout(mod, union_type.hasTag());
- if (layout.payload_size == 0) {
- const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty);
- gop.value_ptr.* = enum_tag_llvm_ty;
- return enum_tag_llvm_ty;
- }
+ if (union_obj.layout == .Packed) {
+ const int_ty = try o.builder.intType(@intCast(t.bitSize(mod)));
+ gop.value_ptr.* = int_ty;
+ return int_ty;
+ }
- const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod));
+ if (layout.payload_size == 0) {
+ const enum_tag_ty = try o.lowerType(union_obj.tag_ty);
+ gop.value_ptr.* = enum_tag_ty;
+ return enum_tag_ty;
+ }
- const llvm_union_ty = o.context.structCreateNamed(name);
- gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
+ const name = try o.builder.string(mod.intern_pool.stringToSlice(
+ try union_obj.getFullyQualifiedName(mod),
+ ));
+ const ty = try o.builder.opaqueType(name);
+ gop.value_ptr.* = ty; // must be done before any recursive calls
- const aligned_field = union_obj.fields.values()[layout.most_aligned_field];
- const llvm_aligned_field_ty = try o.lowerType(aligned_field.ty);
+ const aligned_field = union_obj.fields.values()[layout.most_aligned_field];
+ const aligned_field_ty = try o.lowerType(aligned_field.ty);
- const llvm_payload_ty = t: {
- if (layout.most_aligned_field_size == layout.payload_size) {
- break :t llvm_aligned_field_ty;
- }
- const padding_len = if (layout.tag_size == 0)
- @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size))
- else
- @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size));
- const fields: [2]*llvm.Type = .{
- llvm_aligned_field_ty,
- o.context.intType(8).arrayType(padding_len),
+ const payload_ty = ty: {
+ if (layout.most_aligned_field_size == layout.payload_size) {
+ break :ty aligned_field_ty;
+ }
+ const padding_len = if (layout.tag_size == 0)
+ layout.abi_size - layout.most_aligned_field_size
+ else
+ layout.payload_size - layout.most_aligned_field_size;
+ break :ty try o.builder.structType(.@"packed", &.{
+ aligned_field_ty,
+ try o.builder.arrayType(padding_len, .i8),
+ });
};
- break :t o.context.structType(&fields, fields.len, .True);
- };
- if (layout.tag_size == 0) {
- var llvm_fields: [1]*llvm.Type = .{llvm_payload_ty};
- llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
- return llvm_union_ty;
- }
- const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty);
+ if (layout.tag_size == 0) {
+ try o.builder.namedTypeSetBody(
+ ty,
+ try o.builder.structType(.normal, &.{payload_ty}),
+ );
+ return ty;
+ }
+ const enum_tag_ty = try o.lowerType(union_obj.tag_ty);
- // Put the tag before or after the payload depending on which one's
- // alignment is greater.
- var llvm_fields: [3]*llvm.Type = undefined;
- var llvm_fields_len: c_uint = 2;
+ // Put the tag before or after the payload depending on which one's
+ // alignment is greater.
+ var llvm_fields: [3]Builder.Type = undefined;
+ var llvm_fields_len: usize = 2;
- if (layout.tag_align >= layout.payload_align) {
- llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined };
- } else {
- llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined };
- }
+ if (layout.tag_align >= layout.payload_align) {
+ llvm_fields = .{ enum_tag_ty, payload_ty, .none };
+ } else {
+ llvm_fields = .{ payload_ty, enum_tag_ty, .none };
+ }
- // Insert padding to make the LLVM struct ABI size match the Zig union ABI size.
- if (layout.padding != 0) {
- llvm_fields[2] = o.context.intType(8).arrayType(layout.padding);
- llvm_fields_len = 3;
- }
+ // Insert padding to make the LLVM struct ABI size match the Zig union ABI size.
+ if (layout.padding != 0) {
+ llvm_fields[llvm_fields_len] = try o.builder.arrayType(layout.padding, .i8);
+ llvm_fields_len += 1;
+ }
- llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False);
- return llvm_union_ty;
+ try o.builder.namedTypeSetBody(
+ ty,
+ try o.builder.structType(.normal, llvm_fields[0..llvm_fields_len]),
+ );
+ return ty;
+ },
+ .opaque_type => |opaque_type| {
+ const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
+ if (!gop.found_existing) {
+ const name = try o.builder.string(mod.intern_pool.stringToSlice(
+ try mod.opaqueFullyQualifiedName(opaque_type),
+ ));
+ gop.value_ptr.* = try o.builder.opaqueType(name);
+ }
+ return gop.value_ptr.*;
+ },
+ .enum_type => |enum_type| try o.lowerType(enum_type.tag_ty.toType()),
+ .func_type => |func_type| try o.lowerTypeFn(func_type),
+ .error_set_type, .inferred_error_set_type => Builder.Type.err_int,
+ // values, not types
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ // memoization, not types
+ .memoized_call,
+ => unreachable,
},
- .Fn => return lowerTypeFn(o, t),
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .EnumLiteral => unreachable,
+ };
+ }
- .Frame => @panic("TODO implement llvmType for Frame types"),
- .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"),
- }
+ /// Use this instead of lowerType when you want to handle correctly the case of elem_ty
+ /// being a zero bit type, but it should still be lowered as an i8 in such case.
+ /// There are other similar cases handled here as well.
+ fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type {
+ const mod = o.module;
+ const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
+ .Opaque => true,
+ .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
+ .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
+ };
+ return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8;
}
- fn lowerTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type {
+ fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const ip = &mod.intern_pool;
- const fn_info = mod.typeToFunc(fn_ty).?;
- const llvm_ret_ty = try lowerFnRetTy(o, fn_info);
+ const target = mod.getTarget();
+ const ret_ty = try lowerFnRetTy(o, fn_info);
- var llvm_params = std.ArrayList(*llvm.Type).init(o.gpa);
- defer llvm_params.deinit();
+ var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
+ defer llvm_params.deinit(o.gpa);
if (firstParamSRet(fn_info, mod)) {
- try llvm_params.append(o.context.pointerType(0));
+ try llvm_params.append(o.gpa, .ptr);
}
if (fn_info.return_type.toType().isError(mod) and
mod.comp.bin_file.options.error_return_tracing)
{
const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
- try llvm_params.append(try o.lowerType(ptr_ty));
+ try llvm_params.append(o.gpa, try o.lowerType(ptr_ty));
}
var it = iterateParamTypes(o, fn_info);
- while (it.next()) |lowering| switch (lowering) {
+ while (try it.next()) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- try llvm_params.append(try o.lowerType(param_ty));
+ try llvm_params.append(o.gpa, try o.lowerType(param_ty));
},
.byref, .byref_mut => {
- try llvm_params.append(o.context.pointerType(0));
+ try llvm_params.append(o.gpa, .ptr);
},
.abi_sized_int => {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
- try llvm_params.append(o.context.intType(abi_size * 8));
+ try llvm_params.append(o.gpa, try o.builder.intType(
+ @intCast(param_ty.abiSize(mod) * 8),
+ ));
},
.slice => {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
- const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
- param_ty.optionalChild(mod).slicePtrFieldType(mod)
- else
- param_ty.slicePtrFieldType(mod);
- const ptr_llvm_ty = try o.lowerType(ptr_ty);
- const len_llvm_ty = try o.lowerType(Type.usize);
-
- try llvm_params.ensureUnusedCapacity(2);
- llvm_params.appendAssumeCapacity(ptr_llvm_ty);
- llvm_params.appendAssumeCapacity(len_llvm_ty);
+ try llvm_params.appendSlice(o.gpa, &.{
+ try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)),
+ try o.lowerType(Type.usize),
+ });
},
.multiple_llvm_types => {
- try llvm_params.appendSlice(it.llvm_types_buffer[0..it.llvm_types_len]);
+ try llvm_params.appendSlice(o.gpa, it.types_buffer[0..it.types_len]);
},
.as_u16 => {
- try llvm_params.append(o.context.intType(16));
+ try llvm_params.append(o.gpa, .i16);
},
.float_array => |count| {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
- const field_count = @as(c_uint, @intCast(count));
- const arr_ty = float_ty.arrayType(field_count);
- try llvm_params.append(arr_ty);
+ try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty));
},
.i32_array, .i64_array => |arr_len| {
- const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
- const arr_ty = o.context.intType(elem_size).arrayType(arr_len);
- try llvm_params.append(arr_ty);
+ try llvm_params.append(o.gpa, try o.builder.arrayType(arr_len, switch (lowering) {
+ .i32_array => .i32,
+ .i64_array => .i64,
+ else => unreachable,
+ }));
},
};
- return llvm.functionType(
- llvm_ret_ty,
- llvm_params.items.ptr,
- @as(c_uint, @intCast(llvm_params.items.len)),
- llvm.Bool.fromBool(fn_info.is_var_args),
+ return o.builder.fnType(
+ ret_ty,
+ llvm_params.items,
+ if (fn_info.is_var_args) .vararg else .normal,
);
}
- /// Use this instead of lowerType when you want to handle correctly the case of elem_ty
- /// being a zero bit type, but it should still be lowered as an i8 in such case.
- /// There are other similar cases handled here as well.
- fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!*llvm.Type {
+ fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant {
const mod = o.module;
- const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
- .Opaque => true,
- .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
- .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
- else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
- };
- const llvm_elem_ty = if (lower_elem_ty)
- try o.lowerType(elem_ty)
- else
- o.context.intType(8);
-
- return llvm_elem_ty;
- }
-
- fn lowerValue(o: *Object, arg_tv: TypedValue) Error!*llvm.Value {
- const mod = o.module;
- const gpa = o.gpa;
const target = mod.getTarget();
- var tv = arg_tv;
- switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
- .runtime_value => |rt| tv.val = rt.val.toValue(),
+
+ var val = arg_val.toValue();
+ const arg_val_key = mod.intern_pool.indexToKey(arg_val);
+ switch (arg_val_key) {
+ .runtime_value => |rt| val = rt.val.toValue(),
else => {},
}
- if (tv.val.isUndefDeep(mod)) {
- const llvm_type = try o.lowerType(tv.ty);
- return llvm_type.getUndef();
+ if (val.isUndefDeep(mod)) {
+ return o.builder.undefConst(try o.lowerType(arg_val_key.typeOf().toType()));
}
- switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
+ const val_key = mod.intern_pool.indexToKey(val.toIntern());
+ const ty = val_key.typeOf().toType();
+ return switch (val_key) {
.int_type,
.ptr_type,
.array_type,
@@ -3167,10 +3588,8 @@ pub const Object = struct {
.@"unreachable",
.generic_poison,
=> unreachable, // non-runtime values
- .false, .true => {
- const llvm_type = try o.lowerType(tv.ty);
- return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
- },
+ .false => .false,
+ .true => .true,
},
.variable,
.enum_literal,
@@ -3180,309 +3599,276 @@ pub const Object = struct {
const fn_decl_index = extern_func.decl;
const fn_decl = mod.declPtr(fn_decl_index);
try mod.markDeclAlive(fn_decl);
- return o.resolveLlvmFunction(fn_decl_index);
+ const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ return function_index.ptrConst(&o.builder).global.toConst();
},
.func => |func| {
const fn_decl_index = func.owner_decl;
const fn_decl = mod.declPtr(fn_decl_index);
try mod.markDeclAlive(fn_decl);
- return o.resolveLlvmFunction(fn_decl_index);
+ const function_index = try o.resolveLlvmFunction(fn_decl_index);
+ return function_index.ptrConst(&o.builder).global.toConst();
},
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = tv.val.toBigInt(&bigint_space, mod);
- return lowerBigInt(o, tv.ty, bigint);
+ const bigint = val.toBigInt(&bigint_space, mod);
+ return lowerBigInt(o, ty, bigint);
},
.err => |err| {
- const llvm_ty = try o.lowerType(Type.anyerror);
const int = try mod.getErrorValue(err.name);
- return llvm_ty.constInt(int, .False);
+ const llvm_int = try o.builder.intConst(Builder.Type.err_int, int);
+ return llvm_int;
},
.error_union => |error_union| {
- const err_tv: TypedValue = switch (error_union.val) {
- .err_name => |err_name| .{
- .ty = tv.ty.errorUnionSet(mod),
- .val = (try mod.intern(.{ .err = .{
- .ty = tv.ty.errorUnionSet(mod).toIntern(),
- .name = err_name,
- } })).toValue(),
- },
- .payload => .{
- .ty = Type.err_int,
- .val = try mod.intValue(Type.err_int, 0),
- },
+ const err_val = switch (error_union.val) {
+ .err_name => |err_name| try mod.intern(.{ .err = .{
+ .ty = ty.errorUnionSet(mod).toIntern(),
+ .name = err_name,
+ } }),
+ .payload => (try mod.intValue(Type.err_int, 0)).toIntern(),
};
- const payload_type = tv.ty.errorUnionPayload(mod);
+ const payload_type = ty.errorUnionPayload(mod);
if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
- return o.lowerValue(err_tv);
+ return o.lowerValue(err_val);
}
const payload_align = payload_type.abiAlignment(mod);
- const error_align = err_tv.ty.abiAlignment(mod);
- const llvm_error_value = try o.lowerValue(err_tv);
- const llvm_payload_value = try o.lowerValue(.{
- .ty = payload_type,
- .val = switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }),
- .payload => |payload| payload,
- }.toValue(),
+ const error_align = Type.err_int.abiAlignment(mod);
+ const llvm_error_value = try o.lowerValue(err_val);
+ const llvm_payload_value = try o.lowerValue(switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }),
+ .payload => |payload| payload,
});
- var fields_buf: [3]*llvm.Value = undefined;
-
- const llvm_ty = try o.lowerType(tv.ty);
- const llvm_field_count = llvm_ty.countStructElementTypes();
- if (llvm_field_count > 2) {
- assert(llvm_field_count == 3);
- fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
- }
+ var fields: [3]Builder.Type = undefined;
+ var vals: [3]Builder.Constant = undefined;
if (error_align > payload_align) {
- fields_buf[0] = llvm_error_value;
- fields_buf[1] = llvm_payload_value;
- return o.context.constStruct(&fields_buf, llvm_field_count, .False);
+ vals[0] = llvm_error_value;
+ vals[1] = llvm_payload_value;
} else {
- fields_buf[0] = llvm_payload_value;
- fields_buf[1] = llvm_error_value;
- return o.context.constStruct(&fields_buf, llvm_field_count, .False);
+ vals[0] = llvm_payload_value;
+ vals[1] = llvm_error_value;
}
- },
- .enum_tag => {
- const int_val = try tv.intFromEnum(mod);
-
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_space, mod);
-
- const int_info = tv.ty.intInfo(mod);
- const llvm_type = o.context.intType(int_info.bits);
-
- const unsigned_val = v: {
- if (bigint.limbs.len == 1) {
- break :v llvm_type.constInt(bigint.limbs[0], .False);
- }
- if (@sizeOf(usize) == @sizeOf(u64)) {
- break :v llvm_type.constIntOfArbitraryPrecision(
- @as(c_uint, @intCast(bigint.limbs.len)),
- bigint.limbs.ptr,
- );
- }
- @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
- };
- if (!bigint.positive) {
- return llvm.constNeg(unsigned_val);
+ fields[0] = vals[0].typeOf(&o.builder);
+ fields[1] = vals[1].typeOf(&o.builder);
+
+ const llvm_ty = try o.lowerType(ty);
+ const llvm_ty_fields = llvm_ty.structFields(&o.builder);
+ if (llvm_ty_fields.len > 2) {
+ assert(llvm_ty_fields.len == 3);
+ fields[2] = llvm_ty_fields[2];
+ vals[2] = try o.builder.undefConst(fields[2]);
}
- return unsigned_val;
+ return o.builder.structConst(try o.builder.structType(
+ llvm_ty.structKind(&o.builder),
+ fields[0..llvm_ty_fields.len],
+ ), vals[0..llvm_ty_fields.len]);
},
- .float => {
- const llvm_ty = try o.lowerType(tv.ty);
- switch (tv.ty.floatBits(target)) {
- 16 => {
- const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod)));
- const llvm_i16 = o.context.intType(16);
- const int = llvm_i16.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 32 => {
- const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod)));
- const llvm_i32 = o.context.intType(32);
- const int = llvm_i32.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 64 => {
- const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod)));
- const llvm_i64 = o.context.intType(64);
- const int = llvm_i64.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 80 => {
- const float = tv.val.toFloat(f80, mod);
- const repr = std.math.break_f80(float);
- const llvm_i80 = o.context.intType(80);
- var x = llvm_i80.constInt(repr.exp, .False);
- x = x.constShl(llvm_i80.constInt(64, .False));
- x = x.constOr(llvm_i80.constInt(repr.fraction, .False));
- if (backendSupportsF80(target)) {
- return x.constBitCast(llvm_ty);
- } else {
- return x;
- }
- },
- 128 => {
- var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod)));
- // LLVM seems to require that the lower half of the f128 be placed first
- // in the buffer.
- if (native_endian == .Big) {
- std.mem.swap(u64, &buf[0], &buf[1]);
- }
- const int = o.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
- return int.constBitCast(llvm_ty);
- },
- else => unreachable,
- }
+ .enum_tag => |enum_tag| o.lowerValue(enum_tag.int),
+ .float => switch (ty.floatBits(target)) {
+ 16 => if (backendSupportsF16(target))
+ try o.builder.halfConst(val.toFloat(f16, mod))
+ else
+ try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))),
+ 32 => try o.builder.floatConst(val.toFloat(f32, mod)),
+ 64 => try o.builder.doubleConst(val.toFloat(f64, mod)),
+ 80 => if (backendSupportsF80(target))
+ try o.builder.x86_fp80Const(val.toFloat(f80, mod))
+ else
+ try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))),
+ 128 => try o.builder.fp128Const(val.toFloat(f128, mod)),
+ else => unreachable,
},
.ptr => |ptr| {
- const ptr_tv: TypedValue = switch (ptr.len) {
- .none => tv,
- else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) },
+ const ptr_ty = switch (ptr.len) {
+ .none => ty,
+ else => ty.slicePtrFieldType(mod),
};
- const llvm_ptr_val = switch (ptr.addr) {
- .decl => |decl| try o.lowerDeclRefValue(ptr_tv, decl),
- .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_tv, mut_decl.decl),
- .int => |int| try o.lowerIntAsPtr(int.toValue()),
+ const ptr_val = switch (ptr.addr) {
+ .decl => |decl| try o.lowerDeclRefValue(ptr_ty, decl),
+ .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_ty, mut_decl.decl),
+ .int => |int| try o.lowerIntAsPtr(int),
.eu_payload,
.opt_payload,
.elem,
.field,
- => try o.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0),
+ => try o.lowerParentPtr(val, ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0),
.comptime_field => unreachable,
};
switch (ptr.len) {
- .none => return llvm_ptr_val,
- else => {
- const fields: [2]*llvm.Value = .{
- llvm_ptr_val,
- try o.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }),
- };
- return o.context.constStruct(&fields, fields.len, .False);
- },
+ .none => return ptr_val,
+ else => return o.builder.structConst(try o.lowerType(ty), &.{
+ ptr_val, try o.lowerValue(ptr.len),
+ }),
}
},
.opt => |opt| {
comptime assert(optional_layout_version == 3);
- const payload_ty = tv.ty.optionalChild(mod);
+ const payload_ty = ty.optionalChild(mod);
- const llvm_i8 = o.context.intType(8);
- const non_null_bit = switch (opt.val) {
- .none => llvm_i8.constNull(),
- else => llvm_i8.constInt(1, .False),
- };
+ const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none));
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return non_null_bit;
}
- const llvm_ty = try o.lowerType(tv.ty);
- if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) {
- .none => llvm_ty.constNull(),
- else => |payload| o.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }),
+ const llvm_ty = try o.lowerType(ty);
+ if (ty.optionalReprIsPayload(mod)) return switch (opt.val) {
+ .none => switch (llvm_ty.tag(&o.builder)) {
+ .integer => try o.builder.intConst(llvm_ty, 0),
+ .pointer => try o.builder.nullConst(llvm_ty),
+ .structure => try o.builder.zeroInitConst(llvm_ty),
+ else => unreachable,
+ },
+ else => |payload| try o.lowerValue(payload),
};
assert(payload_ty.zigTypeTag(mod) != .Fn);
- const llvm_field_count = llvm_ty.countStructElementTypes();
- var fields_buf: [3]*llvm.Value = undefined;
- fields_buf[0] = try o.lowerValue(.{
- .ty = payload_ty,
- .val = switch (opt.val) {
- .none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
- else => |payload| payload,
- }.toValue(),
+ var fields: [3]Builder.Type = undefined;
+ var vals: [3]Builder.Constant = undefined;
+ vals[0] = try o.lowerValue(switch (opt.val) {
+ .none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ else => |payload| payload,
});
- fields_buf[1] = non_null_bit;
- if (llvm_field_count > 2) {
- assert(llvm_field_count == 3);
- fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
+ vals[1] = non_null_bit;
+ fields[0] = vals[0].typeOf(&o.builder);
+ fields[1] = vals[1].typeOf(&o.builder);
+
+ const llvm_ty_fields = llvm_ty.structFields(&o.builder);
+ if (llvm_ty_fields.len > 2) {
+ assert(llvm_ty_fields.len == 3);
+ fields[2] = llvm_ty_fields[2];
+ vals[2] = try o.builder.undefConst(fields[2]);
}
- return o.context.constStruct(&fields_buf, llvm_field_count, .False);
+ return o.builder.structConst(try o.builder.structType(
+ llvm_ty.structKind(&o.builder),
+ fields[0..llvm_ty_fields.len],
+ ), vals[0..llvm_ty_fields.len]);
},
- .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) {
- .array_type => switch (aggregate.storage) {
- .bytes => |bytes| return o.context.constString(
- bytes.ptr,
- @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))),
- .True, // Don't null terminate. Bytes has the sentinel, if any.
- ),
- .elems => |elem_vals| {
- const elem_ty = tv.ty.childType(mod);
- const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len);
- defer gpa.free(llvm_elems);
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .array_type => |array_type| switch (aggregate.storage) {
+ .bytes => |bytes| try o.builder.stringConst(try o.builder.string(bytes)),
+ .elems => |elems| {
+ const array_ty = try o.lowerType(ty);
+ const elem_ty = array_ty.childType(&o.builder);
+ assert(elems.len == array_ty.aggregateLen(&o.builder));
+
+ const ExpectedContents = extern struct {
+ vals: [Builder.expected_fields_len]Builder.Constant,
+ fields: [Builder.expected_fields_len]Builder.Type,
+ };
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa);
+ const allocator = stack.get();
+ const vals = try allocator.alloc(Builder.Constant, elems.len);
+ defer allocator.free(vals);
+ const fields = try allocator.alloc(Builder.Type, elems.len);
+ defer allocator.free(fields);
+
var need_unnamed = false;
- for (elem_vals, 0..) |elem_val, i| {
- llvm_elems[i] = try o.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() });
- need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[i]);
- }
- if (need_unnamed) {
- return o.context.constStruct(
- llvm_elems.ptr,
- @as(c_uint, @intCast(llvm_elems.len)),
- .True,
- );
- } else {
- const llvm_elem_ty = try o.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @as(c_uint, @intCast(llvm_elems.len)),
- );
+ for (vals, fields, elems) |*result_val, *result_field, elem| {
+ result_val.* = try o.lowerValue(elem);
+ result_field.* = result_val.typeOf(&o.builder);
+ if (result_field.* != elem_ty) need_unnamed = true;
}
+ return if (need_unnamed) try o.builder.structConst(
+ try o.builder.structType(.normal, fields),
+ vals,
+ ) else try o.builder.arrayConst(array_ty, vals);
},
- .repeated_elem => |val| {
- const elem_ty = tv.ty.childType(mod);
- const sentinel = tv.ty.sentinel(mod);
- const len = @as(usize, @intCast(tv.ty.arrayLen(mod)));
- const len_including_sent = len + @intFromBool(sentinel != null);
- const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
- defer gpa.free(llvm_elems);
+ .repeated_elem => |elem| {
+ const len: usize = @intCast(array_type.len);
+ const len_including_sentinel: usize =
+ @intCast(len + @intFromBool(array_type.sentinel != .none));
+ const array_ty = try o.lowerType(ty);
+ const elem_ty = array_ty.childType(&o.builder);
+
+ const ExpectedContents = extern struct {
+ vals: [Builder.expected_fields_len]Builder.Constant,
+ fields: [Builder.expected_fields_len]Builder.Type,
+ };
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa);
+ const allocator = stack.get();
+ const vals = try allocator.alloc(Builder.Constant, len_including_sentinel);
+ defer allocator.free(vals);
+ const fields = try allocator.alloc(Builder.Type, len_including_sentinel);
+ defer allocator.free(fields);
var need_unnamed = false;
- if (len != 0) {
- for (llvm_elems[0..len]) |*elem| {
- elem.* = try o.lowerValue(.{ .ty = elem_ty, .val = val.toValue() });
- }
- need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[0]);
- }
-
- if (sentinel) |sent| {
- llvm_elems[len] = try o.lowerValue(.{ .ty = elem_ty, .val = sent });
- need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[len]);
+ @memset(vals[0..len], try o.lowerValue(elem));
+ @memset(fields[0..len], vals[0].typeOf(&o.builder));
+ if (fields[0] != elem_ty) need_unnamed = true;
+
+ if (array_type.sentinel != .none) {
+ vals[len] = try o.lowerValue(array_type.sentinel);
+ fields[len] = vals[len].typeOf(&o.builder);
+ if (fields[len] != elem_ty) need_unnamed = true;
}
- if (need_unnamed) {
- return o.context.constStruct(
- llvm_elems.ptr,
- @as(c_uint, @intCast(llvm_elems.len)),
- .True,
- );
- } else {
- const llvm_elem_ty = try o.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @as(c_uint, @intCast(llvm_elems.len)),
- );
- }
+ return if (need_unnamed) try o.builder.structConst(
+ try o.builder.structType(.@"packed", fields),
+ vals,
+ ) else try o.builder.arrayConst(array_ty, vals);
},
},
.vector_type => |vector_type| {
- const elem_ty = vector_type.child.toType();
- const llvm_elems = try gpa.alloc(*llvm.Value, vector_type.len);
- defer gpa.free(llvm_elems);
- const llvm_i8 = o.context.intType(8);
- for (llvm_elems, 0..) |*llvm_elem, i| {
- llvm_elem.* = switch (aggregate.storage) {
- .bytes => |bytes| llvm_i8.constInt(bytes[i], .False),
- .elems => |elems| try o.lowerValue(.{
- .ty = elem_ty,
- .val = elems[i].toValue(),
- }),
- .repeated_elem => |elem| try o.lowerValue(.{
- .ty = elem_ty,
- .val = elem.toValue(),
- }),
- };
+ const vector_ty = try o.lowerType(ty);
+ switch (aggregate.storage) {
+ .bytes, .elems => {
+ const ExpectedContents = [Builder.expected_fields_len]Builder.Constant;
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa);
+ const allocator = stack.get();
+ const vals = try allocator.alloc(Builder.Constant, vector_type.len);
+ defer allocator.free(vals);
+
+ switch (aggregate.storage) {
+ .bytes => |bytes| for (vals, bytes) |*result_val, byte| {
+ result_val.* = try o.builder.intConst(.i8, byte);
+ },
+ .elems => |elems| for (vals, elems) |*result_val, elem| {
+ result_val.* = try o.lowerValue(elem);
+ },
+ .repeated_elem => unreachable,
+ }
+ return o.builder.vectorConst(vector_ty, vals);
+ },
+ .repeated_elem => |elem| return o.builder.splatConst(
+ vector_ty,
+ try o.lowerValue(elem),
+ ),
}
- return llvm.constVector(
- llvm_elems.ptr,
- @as(c_uint, @intCast(llvm_elems.len)),
- );
},
.anon_struct_type => |tuple| {
- var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
- defer llvm_fields.deinit(gpa);
+ const struct_ty = try o.lowerType(ty);
+ const llvm_len = struct_ty.aggregateLen(&o.builder);
- try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+ const ExpectedContents = extern struct {
+ vals: [Builder.expected_fields_len]Builder.Constant,
+ fields: [Builder.expected_fields_len]Builder.Type,
+ };
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa);
+ const allocator = stack.get();
+ const vals = try allocator.alloc(Builder.Constant, llvm_len);
+ defer allocator.free(vals);
+ const fields = try allocator.alloc(Builder.Type, llvm_len);
+ defer allocator.free(fields);
comptime assert(struct_layout_version == 2);
+ var llvm_index: usize = 0;
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
-
- for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -3493,20 +3879,20 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
+ vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]);
+ assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]);
+ llvm_index += 1;
}
- const field_llvm_val = try o.lowerValue(.{
- .ty = field_ty.toType(),
- .val = try tv.val.fieldValue(mod, i),
- });
-
- need_unnamed = need_unnamed or o.isUnnamedType(field_ty.toType(), field_llvm_val);
-
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ vals[llvm_index] =
+ try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
+ fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
+ if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
+ need_unnamed = true;
+ llvm_index += 1;
offset += field_ty.toType().abiSize(mod);
}
@@ -3515,73 +3901,71 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
+ vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]);
+ assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]);
+ llvm_index += 1;
}
}
+ assert(llvm_index == llvm_len);
- if (need_unnamed) {
- return o.context.constStruct(
- llvm_fields.items.ptr,
- @as(c_uint, @intCast(llvm_fields.items.len)),
- .False,
- );
- } else {
- const llvm_struct_ty = try o.lowerType(tv.ty);
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @as(c_uint, @intCast(llvm_fields.items.len)),
- );
- }
+ return o.builder.structConst(if (need_unnamed)
+ try o.builder.structType(struct_ty.structKind(&o.builder), fields)
+ else
+ struct_ty, vals);
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- const llvm_struct_ty = try o.lowerType(tv.ty);
-
+ assert(struct_obj.haveLayout());
+ const struct_ty = try o.lowerType(ty);
if (struct_obj.layout == .Packed) {
- assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(mod);
- const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits)));
- const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *llvm.Value = int_llvm_ty.constNull();
+ var running_int = try o.builder.intConst(struct_ty, 0);
var running_bits: u16 = 0;
- for (fields, 0..) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, field_index| {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const non_int_val = try o.lowerValue(.{
- .ty = field.ty,
- .val = try tv.val.fieldValue(mod, i),
- });
- const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
- const small_int_ty = o.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime(mod))
- non_int_val.constPtrToInt(small_int_ty)
- else
- non_int_val.constBitCast(small_int_ty);
- const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
- // If the field is as large as the entire packed struct, this
- // zext would go from, e.g. i16 to i16. This is legal with
- // constZExtOrBitCast but not legal with constZExt.
- const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
- const shifted = extended_int_val.constShl(shift_rhs);
- running_int = running_int.constOr(shifted);
+ const non_int_val =
+ try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
+ const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
+ const small_int_ty = try o.builder.intType(ty_bit_size);
+ const small_int_val = try o.builder.castConst(
+ if (field.ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
+ non_int_val,
+ small_int_ty,
+ );
+ const shift_rhs = try o.builder.intConst(struct_ty, running_bits);
+ const extended_int_val =
+ try o.builder.convConst(.unsigned, small_int_val, struct_ty);
+ const shifted = try o.builder.binConst(.shl, extended_int_val, shift_rhs);
+ running_int = try o.builder.binConst(.@"or", running_int, shifted);
running_bits += ty_bit_size;
}
return running_int;
}
+ const llvm_len = struct_ty.aggregateLen(&o.builder);
- const llvm_field_count = llvm_struct_ty.countStructElementTypes();
- var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
- defer llvm_fields.deinit(gpa);
+ const ExpectedContents = extern struct {
+ vals: [Builder.expected_fields_len]Builder.Constant,
+ fields: [Builder.expected_fields_len]Builder.Type,
+ };
+ var stack align(@max(
+ @alignOf(std.heap.StackFallbackAllocator(0)),
+ @alignOf(ExpectedContents),
+ )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa);
+ const allocator = stack.get();
+ const vals = try allocator.alloc(Builder.Constant, llvm_len);
+ defer allocator.free(vals);
+ const fields = try allocator.alloc(Builder.Type, llvm_len);
+ defer allocator.free(fields);
comptime assert(struct_layout_version == 2);
+ var llvm_index: usize = 0;
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
-
- var it = struct_obj.runtimeFieldIterator(mod);
- while (it.next()) |field_and_index| {
+ var field_it = struct_obj.runtimeFieldIterator(mod);
+ while (field_it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, struct_obj.layout);
big_align = @max(big_align, field_align);
@@ -3590,20 +3974,22 @@ pub const Object = struct {
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
+ vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]);
+ assert(fields[llvm_index] ==
+ struct_ty.structFields(&o.builder)[llvm_index]);
+ llvm_index += 1;
}
- const field_llvm_val = try o.lowerValue(.{
- .ty = field.ty,
- .val = try tv.val.fieldValue(mod, field_and_index.index),
- });
-
- need_unnamed = need_unnamed or o.isUnnamedType(field.ty, field_llvm_val);
-
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ vals[llvm_index] = try o.lowerValue(
+ (try val.fieldValue(mod, field_and_index.index)).toIntern(),
+ );
+ fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
+ if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
+ need_unnamed = true;
+ llvm_index += 1;
offset += field.ty.abiSize(mod);
}
@@ -3612,202 +3998,158 @@ pub const Object = struct {
offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
- const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
+ vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]);
+ assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]);
+ llvm_index += 1;
}
}
+ assert(llvm_index == llvm_len);
- if (need_unnamed) {
- return o.context.constStruct(
- llvm_fields.items.ptr,
- @as(c_uint, @intCast(llvm_fields.items.len)),
- .False,
- );
- } else {
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @as(c_uint, @intCast(llvm_fields.items.len)),
- );
- }
+ return o.builder.structConst(if (need_unnamed)
+ try o.builder.structType(struct_ty.structKind(&o.builder), fields)
+ else
+ struct_ty, vals);
},
else => unreachable,
},
- .un => {
- const llvm_union_ty = try o.lowerType(tv.ty);
- const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) {
- .none => tv.val.castTag(.@"union").?.data,
- else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
- .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() },
- else => unreachable,
- },
- };
-
- const layout = tv.ty.unionGetLayout(mod);
+ .un => |un| {
+ const union_ty = try o.lowerType(ty);
+ const layout = ty.unionGetLayout(mod);
+ if (layout.payload_size == 0) return o.lowerValue(un.tag);
- if (layout.payload_size == 0) {
- return lowerValue(o, .{
- .ty = tv.ty.unionTagTypeSafety(mod).?,
- .val = tag_and_val.tag,
- });
- }
- const union_obj = mod.typeToUnion(tv.ty).?;
- const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, o.module).?;
+ const union_obj = mod.typeToUnion(ty).?;
+ const field_index = ty.unionTagFieldIndex(un.tag.toValue(), o.module).?;
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
if (union_obj.layout == .Packed) {
- if (!field_ty.hasRuntimeBits(mod))
- return llvm_union_ty.constNull();
- const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
- const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod)));
- const small_int_ty = o.context.intType(ty_bit_size);
- const small_int_val = if (field_ty.isPtrAtRuntime(mod))
- non_int_val.constPtrToInt(small_int_ty)
- else
- non_int_val.constBitCast(small_int_ty);
- return small_int_val.constZExtOrBitCast(llvm_union_ty);
+ if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0);
+ const small_int_val = try o.builder.castConst(
+ if (field_ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
+ try o.lowerValue(un.val),
+ try o.builder.intType(@intCast(field_ty.bitSize(mod))),
+ );
+ return o.builder.convConst(.unsigned, small_int_val, union_ty);
}
// Sometimes we must make an unnamed struct because LLVM does
// not support bitcasting our payload struct to the true union payload type.
// Instead we use an unnamed struct and every reference to the global
// must pointer cast to the expected type before accessing the union.
- var need_unnamed: bool = layout.most_aligned_field != field_index;
+ var need_unnamed = layout.most_aligned_field != field_index;
const payload = p: {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const padding_len = @as(c_uint, @intCast(layout.payload_size));
- break :p o.context.intType(8).arrayType(padding_len).getUndef();
+ const padding_len = layout.payload_size;
+ break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8));
}
- const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
- need_unnamed = need_unnamed or o.isUnnamedType(field_ty, field);
+ const payload = try o.lowerValue(un.val);
+ const payload_ty = payload.typeOf(&o.builder);
+ if (payload_ty != union_ty.structFields(&o.builder)[
+ @intFromBool(layout.tag_align >= layout.payload_align)
+ ]) need_unnamed = true;
const field_size = field_ty.abiSize(mod);
- if (field_size == layout.payload_size) {
- break :p field;
- }
- const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
- const fields: [2]*llvm.Value = .{
- field, o.context.intType(8).arrayType(padding_len).getUndef(),
- };
- break :p o.context.constStruct(&fields, fields.len, .True);
+ if (field_size == layout.payload_size) break :p payload;
+ const padding_len = layout.payload_size - field_size;
+ const padding_ty = try o.builder.arrayType(padding_len, .i8);
+ break :p try o.builder.structConst(
+ try o.builder.structType(.@"packed", &.{ payload_ty, padding_ty }),
+ &.{ payload, try o.builder.undefConst(padding_ty) },
+ );
};
+ const payload_ty = payload.typeOf(&o.builder);
- if (layout.tag_size == 0) {
- const fields: [1]*llvm.Value = .{payload};
- if (need_unnamed) {
- return o.context.constStruct(&fields, fields.len, .False);
- } else {
- return llvm_union_ty.constNamedStruct(&fields, fields.len);
- }
- }
- const llvm_tag_value = try lowerValue(o, .{
- .ty = tv.ty.unionTagTypeSafety(mod).?,
- .val = tag_and_val.tag,
- });
- var fields: [3]*llvm.Value = undefined;
- var fields_len: c_uint = 2;
+ if (layout.tag_size == 0) return o.builder.structConst(if (need_unnamed)
+ try o.builder.structType(union_ty.structKind(&o.builder), &.{payload_ty})
+ else
+ union_ty, &.{payload});
+ const tag = try o.lowerValue(un.tag);
+ const tag_ty = tag.typeOf(&o.builder);
+ var fields: [3]Builder.Type = undefined;
+ var vals: [3]Builder.Constant = undefined;
+ var len: usize = 2;
if (layout.tag_align >= layout.payload_align) {
- fields = .{ llvm_tag_value, payload, undefined };
+ fields = .{ tag_ty, payload_ty, undefined };
+ vals = .{ tag, payload, undefined };
} else {
- fields = .{ payload, llvm_tag_value, undefined };
+ fields = .{ payload_ty, tag_ty, undefined };
+ vals = .{ payload, tag, undefined };
}
if (layout.padding != 0) {
- fields[2] = o.context.intType(8).arrayType(layout.padding).getUndef();
- fields_len = 3;
- }
- if (need_unnamed) {
- return o.context.constStruct(&fields, fields_len, .False);
- } else {
- return llvm_union_ty.constNamedStruct(&fields, fields_len);
+ fields[2] = try o.builder.arrayType(layout.padding, .i8);
+ vals[2] = try o.builder.undefConst(fields[2]);
+ len = 3;
}
+ return o.builder.structConst(if (need_unnamed)
+ try o.builder.structType(union_ty.structKind(&o.builder), fields[0..len])
+ else
+ union_ty, vals[0..len]);
},
.memoized_call => unreachable,
- }
+ };
}
- fn lowerIntAsPtr(o: *Object, val: Value) Error!*llvm.Value {
+ fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant {
const mod = o.module;
- switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .undef => return o.context.pointerType(0).getUndef(),
+ switch (mod.intern_pool.indexToKey(val)) {
+ .undef => return o.builder.undefConst(.ptr),
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_space, mod);
- const llvm_int = lowerBigInt(o, Type.usize, bigint);
- return llvm_int.constIntToPtr(o.context.pointerType(0));
+ const bigint = val.toValue().toBigInt(&bigint_space, mod);
+ const llvm_int = try lowerBigInt(o, Type.usize, bigint);
+ return o.builder.castConst(.inttoptr, llvm_int, .ptr);
},
else => unreachable,
}
}
- fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) *llvm.Value {
+ fn lowerBigInt(
+ o: *Object,
+ ty: Type,
+ bigint: std.math.big.int.Const,
+ ) Allocator.Error!Builder.Constant {
const mod = o.module;
- const int_info = ty.intInfo(mod);
- assert(int_info.bits != 0);
- const llvm_type = o.context.intType(int_info.bits);
-
- const unsigned_val = v: {
- if (bigint.limbs.len == 1) {
- break :v llvm_type.constInt(bigint.limbs[0], .False);
- }
- if (@sizeOf(usize) == @sizeOf(u64)) {
- break :v llvm_type.constIntOfArbitraryPrecision(
- @as(c_uint, @intCast(bigint.limbs.len)),
- bigint.limbs.ptr,
- );
- }
- @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
- };
- if (!bigint.positive) {
- return llvm.constNeg(unsigned_val);
- }
- return unsigned_val;
+ return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint);
}
const ParentPtr = struct {
ty: Type,
- llvm_ptr: *llvm.Value,
+ llvm_ptr: Builder.Value,
};
- fn lowerParentPtrDecl(
- o: *Object,
- ptr_val: Value,
- decl_index: Module.Decl.Index,
- ) Error!*llvm.Value {
+ fn lowerParentPtrDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant {
const mod = o.module;
const decl = mod.declPtr(decl_index);
try mod.markDeclAlive(decl);
const ptr_ty = try mod.singleMutPtrType(decl.ty);
- return try o.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
+ return o.lowerDeclRefValue(ptr_ty, decl_index);
}
- fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value {
+ fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant {
const mod = o.module;
- const target = mod.getTarget();
return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
- .decl => |decl| o.lowerParentPtrDecl(ptr_val, decl),
- .mut_decl => |mut_decl| o.lowerParentPtrDecl(ptr_val, mut_decl.decl),
- .int => |int| o.lowerIntAsPtr(int.toValue()),
+ .decl => |decl| o.lowerParentPtrDecl(decl),
+ .mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl),
+ .int => |int| try o.lowerIntAsPtr(int),
.eu_payload => |eu_ptr| {
- const parent_llvm_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true);
+ const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true);
const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
// to the payload.
- return parent_llvm_ptr;
+ return parent_ptr;
}
- const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1;
- const llvm_u32 = o.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(payload_offset, .False),
- };
- const eu_llvm_ty = try o.lowerType(eu_ty);
- return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ const index: u32 =
+ if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1;
+ return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
+ try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index),
+ });
},
.opt_payload => |opt_ptr| {
- const parent_llvm_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true);
+ const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true);
const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
@@ -3816,99 +4158,89 @@ pub const Object = struct {
{
// In this case, we represent pointer to optional the same as pointer
// to the payload.
- return parent_llvm_ptr;
+ return parent_ptr;
}
- const llvm_u32 = o.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(0, .False),
- };
- const opt_llvm_ty = try o.lowerType(opt_ty);
- return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{
+ try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, 0),
+ });
},
.comptime_field => unreachable,
.elem => |elem_ptr| {
- const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true);
-
- const llvm_usize = try o.lowerType(Type.usize);
- const indices: [1]*llvm.Value = .{
- llvm_usize.constInt(elem_ptr.index, .False),
- };
+ const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true);
const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
- const elem_llvm_ty = try o.lowerType(elem_ty);
- return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+
+ return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
+ try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
+ });
},
.field => |field_ptr| {
- const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
+ const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
- const field_index = @as(u32, @intCast(field_ptr.index));
- const llvm_u32 = o.context.intType(32);
+ const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
if (parent_ty.containerLayout(mod) == .Packed) {
- return parent_llvm_ptr;
+ return parent_ptr;
}
const layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
- return parent_llvm_ptr;
+ return parent_ptr;
}
- const llvm_pl_index = if (layout.tag_size == 0)
- 0
- else
- @intFromBool(layout.tag_align >= layout.payload_align);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_pl_index, .False),
- };
+
const parent_llvm_ty = try o.lowerType(parent_ty);
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
+ try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, @intFromBool(
+ layout.tag_size > 0 and layout.tag_align >= layout.payload_align,
+ )),
+ });
},
.Struct => {
if (parent_ty.containerLayout(mod) == .Packed) {
- if (!byte_aligned) return parent_llvm_ptr;
- const llvm_usize = o.context.intType(target.ptrBitWidth());
- const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
+ if (!byte_aligned) return parent_ptr;
+ const llvm_usize = try o.lowerType(Type.usize);
+ const base_addr =
+ try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
// count bits of fields before this one
const prev_bits = b: {
var b: usize = 0;
for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- b += @as(usize, @intCast(field.ty.bitSize(mod)));
+ if (field.is_comptime) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ b += @intCast(field.ty.bitSize(mod));
}
break :b b;
};
- const byte_offset = llvm_usize.constInt(prev_bits / 8, .False);
- const field_addr = base_addr.constAdd(byte_offset);
- const final_llvm_ty = o.context.pointerType(0);
- return field_addr.constIntToPtr(final_llvm_ty);
+ const byte_offset = try o.builder.intConst(llvm_usize, prev_bits / 8);
+ const field_addr = try o.builder.binConst(.add, base_addr, byte_offset);
+ return o.builder.castConst(.inttoptr, field_addr, .ptr);
}
- const parent_llvm_ty = try o.lowerType(parent_ty);
- if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_field.index, .False),
- };
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- } else {
- const llvm_index = llvm_u32.constInt(@intFromBool(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
- const indices: [1]*llvm.Value = .{llvm_index};
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- }
+ return o.builder.gepConst(
+ .inbounds,
+ try o.lowerType(parent_ty),
+ parent_ptr,
+ null,
+ if (llvmField(parent_ty, field_index, mod)) |llvm_field| &.{
+ try o.builder.intConst(.i32, 0),
+ try o.builder.intConst(.i32, llvm_field.index),
+ } else &.{
+ try o.builder.intConst(.i32, @intFromBool(
+ parent_ty.hasRuntimeBitsIgnoreComptime(mod),
+ )),
+ },
+ );
},
.Pointer => {
assert(parent_ty.isSlice(mod));
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(field_index, .False),
- };
const parent_llvm_ty = try o.lowerType(parent_ty);
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
+ return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
+ try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, field_index),
+ });
},
else => unreachable,
}
@@ -3916,11 +4248,7 @@ pub const Object = struct {
};
}
- fn lowerDeclRefValue(
- o: *Object,
- tv: TypedValue,
- decl_index: Module.Decl.Index,
- ) Error!*llvm.Value {
+ fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant {
const mod = o.module;
// In the case of something like:
@@ -3931,69 +4259,59 @@ pub const Object = struct {
const decl = mod.declPtr(decl_index);
if (decl.val.getFunction(mod)) |func| {
if (func.owner_decl != decl_index) {
- return o.lowerDeclRefValue(tv, func.owner_decl);
+ return o.lowerDeclRefValue(ty, func.owner_decl);
}
} else if (decl.val.getExternFunc(mod)) |func| {
if (func.decl != decl_index) {
- return o.lowerDeclRefValue(tv, func.decl);
+ return o.lowerDeclRefValue(ty, func.decl);
}
}
const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
- (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic))
- {
- return o.lowerPtrToVoid(tv.ty);
- }
+ (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) return o.lowerPtrToVoid(ty);
try mod.markDeclAlive(decl);
- const llvm_decl_val = if (is_fn_body)
- try o.resolveLlvmFunction(decl_index)
+ const llvm_global = if (is_fn_body)
+ (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global
else
- try o.resolveGlobalDecl(decl_index);
+ (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global;
- const target = mod.getTarget();
- const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
- const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: {
- const llvm_decl_wanted_ptr_ty = o.context.pointerType(llvm_wanted_addrspace);
- break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty);
- } else llvm_decl_val;
-
- const llvm_type = try o.lowerType(tv.ty);
- if (tv.ty.zigTypeTag(mod) == .Int) {
- return llvm_val.constPtrToInt(llvm_type);
- } else {
- return llvm_val.constBitCast(llvm_type);
- }
+ const llvm_val = try o.builder.convConst(
+ .unneeded,
+ llvm_global.toConst(),
+ try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())),
+ );
+
+ return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) {
+ .signed => .signed,
+ .unsigned => .unsigned,
+ } else .unneeded, llvm_val, try o.lowerType(ty));
}
- fn lowerPtrToVoid(o: *Object, ptr_ty: Type) !*llvm.Value {
+ fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant {
const mod = o.module;
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
// for non-optional pointers. We also need to respect the alignment, even though
// the address will never be dereferenced.
- const llvm_usize = try o.lowerType(Type.usize);
- const llvm_ptr_ty = try o.lowerType(ptr_ty);
- if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| {
- return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty);
- }
- // Note that these 0xaa values are appropriate even in release-optimized builds
- // because we need a well-defined value that is not null, and LLVM does not
- // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR
- // instruction is followed by a `wrap_optional`, it will return this value
- // verbatim, and the result should test as non-null.
- const target = mod.getTarget();
- const int = switch (target.ptrBitWidth()) {
- 16 => llvm_usize.constInt(0xaaaa, .False),
- 32 => llvm_usize.constInt(0xaaaaaaaa, .False),
- 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False),
+ const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse
+ // Note that these 0xaa values are appropriate even in release-optimized builds
+ // because we need a well-defined value that is not null, and LLVM does not
+ // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR
+ // instruction is followed by a `wrap_optional`, it will return this value
+ // verbatim, and the result should test as non-null.
+ switch (mod.getTarget().ptrBitWidth()) {
+ 16 => 0xaaaa,
+ 32 => 0xaaaaaaaa,
+ 64 => 0xaaaaaaaa_aaaaaaaa,
else => unreachable,
};
- return int.constIntToPtr(llvm_ptr_ty);
+ const llvm_usize = try o.lowerType(Type.usize);
+ const llvm_ptr_ty = try o.lowerType(ptr_ty);
+ return o.builder.castConst(.inttoptr, try o.builder.intConst(llvm_usize, int), llvm_ptr_ty);
}
fn addAttr(o: *Object, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
@@ -4023,7 +4341,7 @@ pub const Object = struct {
) void {
const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
assert(kind_id != 0);
- const llvm_attr = o.context.createEnumAttribute(kind_id, int);
+ const llvm_attr = o.builder.llvm.context.createEnumAttribute(kind_id, int);
val.addAttributeAtIndex(index, llvm_attr);
}
@@ -4034,11 +4352,11 @@ pub const Object = struct {
name: []const u8,
value: []const u8,
) void {
- const llvm_attr = o.context.createStringAttribute(
+ const llvm_attr = o.builder.llvm.context.createStringAttribute(
name.ptr,
- @as(c_uint, @intCast(name.len)),
+ @intCast(name.len),
value.ptr,
- @as(c_uint, @intCast(value.len)),
+ @intCast(value.len),
);
val.addAttributeAtIndex(index, llvm_attr);
}
@@ -4063,23 +4381,23 @@ pub const Object = struct {
/// widen it before using it and then truncate the result.
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
- fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) ?*llvm.Type {
+ fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type {
const mod = o.module;
const int_ty = switch (ty.zigTypeTag(mod)) {
.Int => ty,
.Enum => ty.intTagType(mod),
.Float => {
- if (!is_rmw_xchg) return null;
- return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8)));
+ if (!is_rmw_xchg) return .none;
+ return o.builder.intType(@intCast(ty.abiSize(mod) * 8));
},
- .Bool => return o.context.intType(8),
- else => return null,
+ .Bool => return .i8,
+ else => return .none,
};
const bit_count = int_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8)));
+ return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8));
} else {
- return null;
+ return .none;
}
}
@@ -4120,13 +4438,13 @@ pub const Object = struct {
llvm_arg_i: u32,
alignment: u32,
byval_attr: bool,
- param_llvm_ty: *llvm.Type,
+ param_llvm_ty: Builder.Type,
) void {
o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull");
o.addArgAttr(llvm_fn, llvm_arg_i, "readonly");
o.addArgAttrInt(llvm_fn, llvm_arg_i, "align", alignment);
if (byval_attr) {
- llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty);
+ llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder));
}
}
};
@@ -4159,20 +4477,26 @@ pub const DeclGen = struct {
_ = try o.resolveLlvmFunction(extern_func.decl);
} else {
const target = mod.getTarget();
- var global = try o.resolveGlobalDecl(decl_index);
- global.setAlignment(decl.getAlignment(mod));
- if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s);
+ const variable = try o.resolveGlobalDecl(decl_index);
+ const global = variable.ptrConst(&o.builder).global;
+ var llvm_global = global.toLlvm(&o.builder);
+ variable.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod));
+ llvm_global.setAlignment(decl.getAlignment(mod));
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| {
+ variable.ptr(&o.builder).section = try o.builder.string(section);
+ llvm_global.setSection(section);
+ }
assert(decl.has_tv);
- const init_val = if (decl.val.getVariable(mod)) |variable| init_val: {
- break :init_val variable.init;
- } else init_val: {
- global.setGlobalConstant(.True);
+ const init_val = if (decl.val.getVariable(mod)) |decl_var| decl_var.init else init_val: {
+ variable.ptr(&o.builder).mutability = .constant;
+ llvm_global.setGlobalConstant(.True);
break :init_val decl.val.toIntern();
};
if (init_val != .none) {
- const llvm_init = try o.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() });
- if (global.globalGetValueType() == llvm_init.typeOf()) {
- global.setInitializer(llvm_init);
+ const llvm_init = try o.lowerValue(init_val);
+ const llvm_init_ty = llvm_init.typeOf(&o.builder);
+ if (global.ptrConst(&o.builder).type == llvm_init_ty) {
+ llvm_global.setInitializer(llvm_init.toLlvm(&o.builder));
} else {
// LLVM does not allow us to change the type of globals. So we must
// create a new global with the correct type, copy all its attributes,
@@ -4189,23 +4513,27 @@ pub const DeclGen = struct {
// Related: https://github.com/ziglang/zig/issues/13265
const llvm_global_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const new_global = o.llvm_module.addGlobalInAddressSpace(
- llvm_init.typeOf(),
+ llvm_init_ty.toLlvm(&o.builder),
"",
- llvm_global_addrspace,
+ @intFromEnum(llvm_global_addrspace),
);
- new_global.setLinkage(global.getLinkage());
- new_global.setUnnamedAddr(global.getUnnamedAddress());
- new_global.setAlignment(global.getAlignment());
- if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
- new_global.setSection(s);
- new_global.setInitializer(llvm_init);
+ new_global.setLinkage(llvm_global.getLinkage());
+ new_global.setUnnamedAddr(llvm_global.getUnnamedAddress());
+ new_global.setAlignment(llvm_global.getAlignment());
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
+ new_global.setSection(section);
+ new_global.setInitializer(llvm_init.toLlvm(&o.builder));
// TODO: How should this work then the address space of a global changed?
- global.replaceAllUsesWith(new_global);
- o.decl_map.putAssumeCapacity(decl_index, new_global);
- new_global.takeName(global);
- global.deleteGlobal();
- global = new_global;
+ llvm_global.replaceAllUsesWith(new_global);
+ new_global.takeName(llvm_global);
+ o.builder.llvm.globals.items[@intFromEnum(variable.ptrConst(&o.builder).global)] =
+ new_global;
+ llvm_global.deleteGlobal();
+ llvm_global = new_global;
+ variable.ptr(&o.builder).mutability = .global;
+ global.ptr(&o.builder).type = llvm_init_ty;
}
+ variable.ptr(&o.builder).init = llvm_init;
}
if (o.di_builder) |dib| {
@@ -4216,7 +4544,7 @@ pub const DeclGen = struct {
const di_global = dib.createGlobalVariableExpression(
di_file.toScope(),
mod.intern_pool.stringToSlice(decl.name),
- global.getValueName(),
+ llvm_global.getValueName(),
di_file,
line_number,
try o.lowerDebugType(decl.ty, .full),
@@ -4224,7 +4552,7 @@ pub const DeclGen = struct {
);
try o.di_map.put(o.gpa, dg.decl, di_global.getVariable().toNode());
- if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global);
+ if (!is_internal_linkage or decl.isExtern(mod)) llvm_global.attachMetaData(di_global);
}
}
}
@@ -4235,7 +4563,7 @@ pub const FuncGen = struct {
dg: *DeclGen,
air: Air,
liveness: Liveness,
- context: *llvm.Context,
+ wip: Builder.WipFunction,
builder: *llvm.Builder,
di_scope: ?*llvm.DIScope,
di_file: ?*llvm.DIFile,
@@ -4252,43 +4580,44 @@ pub const FuncGen = struct {
/// This stores the LLVM values used in a function, such that they can be referred to
/// in other instructions. This table is cleared before every function is generated.
- func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *llvm.Value),
+ func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, Builder.Value),
/// If the return type is sret, this is the result pointer. Otherwise null.
/// Note that this can disagree with isByRef for the return type in the case
/// of C ABI functions.
- ret_ptr: ?*llvm.Value,
+ ret_ptr: Builder.Value,
/// Any function that needs to perform Valgrind client requests needs an array alloca
/// instruction, however a maximum of one per function is needed.
- valgrind_client_request_array: ?*llvm.Value = null,
+ valgrind_client_request_array: Builder.Value = .none,
/// These fields are used to refer to the LLVM value of the function parameters
/// in an Arg instruction.
/// This list may be shorter than the list according to the zig type system;
/// it omits 0-bit types. If the function uses sret as the first parameter,
/// this slice does not include it.
- args: []const *llvm.Value,
- arg_index: c_uint,
+ args: []const Builder.Value,
+ arg_index: usize,
- llvm_func: *llvm.Value,
-
- err_ret_trace: ?*llvm.Value = null,
+ err_ret_trace: Builder.Value = .none,
/// This data structure is used to implement breaking to blocks.
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
- parent_bb: *llvm.BasicBlock,
+ parent_bb: Builder.Function.Block.Index,
breaks: *BreakList,
}),
- single_threaded: bool,
+ sync_scope: Builder.SyncScope,
const DbgState = struct { loc: *llvm.DILocation, scope: *llvm.DIScope, base_line: u32 };
- const BreakList = std.MultiArrayList(struct {
- bb: *llvm.BasicBlock,
- val: *llvm.Value,
- });
+ const BreakList = union {
+ list: std.MultiArrayList(struct {
+ bb: Builder.Function.Block.Index,
+ val: Builder.Value,
+ }),
+ len: usize,
+ };
fn deinit(self: *FuncGen) void {
- self.builder.dispose();
+ self.wip.deinit();
self.dbg_inlined.deinit(self.gpa);
self.dbg_block_stack.deinit(self.gpa);
self.func_inst_table.deinit(self.gpa);
@@ -4300,7 +4629,7 @@ pub const FuncGen = struct {
return self.dg.todo(format, args);
}
- fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*llvm.Value {
+ fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value {
const gpa = self.gpa;
const gop = try self.func_inst_table.getOrPut(gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
@@ -4311,14 +4640,14 @@ pub const FuncGen = struct {
.ty = self.typeOf(inst),
.val = (try self.air.value(inst, mod)).?,
});
- gop.value_ptr.* = llvm_val;
- return llvm_val;
+ gop.value_ptr.* = llvm_val.toValue();
+ return llvm_val.toValue();
}
- fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value {
+ fn resolveValue(self: *FuncGen, tv: TypedValue) Error!Builder.Constant {
const o = self.dg.object;
const mod = o.module;
- const llvm_val = try o.lowerValue(tv);
+ const llvm_val = try o.lowerValue(tv.val.toIntern());
if (!isByRef(tv.ty, mod)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
@@ -4326,17 +4655,50 @@ pub const FuncGen = struct {
const target = mod.getTarget();
const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target);
- const global = o.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace);
- global.setInitializer(llvm_val);
- global.setLinkage(.Private);
- global.setGlobalConstant(.True);
- global.setUnnamedAddr(.True);
- global.setAlignment(tv.ty.abiAlignment(mod));
- const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
- global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace))
- else
- global;
- return addrspace_casted_ptr;
+ const llvm_ty = llvm_val.typeOf(&o.builder);
+ const llvm_alignment = tv.ty.abiAlignment(mod);
+ const llvm_global = o.llvm_module.addGlobalInAddressSpace(llvm_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_actual_addrspace));
+ llvm_global.setInitializer(llvm_val.toLlvm(&o.builder));
+ llvm_global.setLinkage(.Private);
+ llvm_global.setGlobalConstant(.True);
+ llvm_global.setUnnamedAddr(.True);
+ llvm_global.setAlignment(llvm_alignment);
+
+ var global = Builder.Global{
+ .linkage = .private,
+ .unnamed_addr = .unnamed_addr,
+ .addr_space = llvm_actual_addrspace,
+ .type = llvm_ty,
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
+ };
+ var variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ .mutability = .constant,
+ .init = llvm_val,
+ .alignment = Builder.Alignment.fromByteUnits(llvm_alignment),
+ };
+ try o.builder.llvm.globals.append(o.gpa, llvm_global);
+ const global_index = try o.builder.addGlobal(.empty, global);
+ try o.builder.variables.append(o.gpa, variable);
+
+ return o.builder.convConst(
+ .unneeded,
+ global_index.toConst(),
+ try o.builder.ptrType(llvm_wanted_addrspace),
+ );
+ }
+
+ fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant {
+ const o = self.dg.object;
+ const mod = o.module;
+ if (o.null_opt_usize == .no_init) {
+ const ty = try mod.intern(.{ .opt_type = .usize_type });
+ o.null_opt_usize = try self.resolveValue(.{
+ .ty = ty.toType(),
+ .val = (try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } })).toValue(),
+ });
+ }
+ return o.null_opt_usize;
}
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
@@ -4345,10 +4707,9 @@ pub const FuncGen = struct {
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
- if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
- continue;
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue;
- const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
+ const val: Builder.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
.add_optimized => try self.airAdd(inst, true),
@@ -4439,15 +4800,15 @@ pub const FuncGen = struct {
.cmp_vector_optimized => try self.airCmpVector(inst, true),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
- .is_non_null => try self.airIsNonNull(inst, false, .NE),
- .is_non_null_ptr => try self.airIsNonNull(inst, true , .NE),
- .is_null => try self.airIsNonNull(inst, false, .EQ),
- .is_null_ptr => try self.airIsNonNull(inst, true , .EQ),
+ .is_non_null => try self.airIsNonNull(inst, false, .ne),
+ .is_non_null_ptr => try self.airIsNonNull(inst, true , .ne),
+ .is_null => try self.airIsNonNull(inst, false, .eq),
+ .is_null_ptr => try self.airIsNonNull(inst, true , .eq),
- .is_non_err => try self.airIsErr(inst, .EQ, false),
- .is_non_err_ptr => try self.airIsErr(inst, .EQ, true),
- .is_err => try self.airIsErr(inst, .NE, false),
- .is_err_ptr => try self.airIsErr(inst, .NE, true),
+ .is_non_err => try self.airIsErr(inst, .eq, false),
+ .is_non_err_ptr => try self.airIsErr(inst, .eq, true),
+ .is_err => try self.airIsErr(inst, .ne, false),
+ .is_err_ptr => try self.airIsErr(inst, .ne, true),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
@@ -4524,10 +4885,10 @@ pub const FuncGen = struct {
.reduce => try self.airReduce(inst, false),
.reduce_optimized => try self.airReduce(inst, true),
- .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
- .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
- .atomic_store_release => try self.airAtomicStore(inst, .Release),
- .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent),
+ .atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr => try self.airStructFieldPtr(inst),
.struct_field_val => try self.airStructFieldVal(body[i..]),
@@ -4569,8 +4930,8 @@ pub const FuncGen = struct {
.inferred_alloc, .inferred_alloc_comptime => unreachable,
- .unreach => self.airUnreach(inst),
- .dbg_stmt => self.airDbgStmt(inst),
+ .unreach => try self.airUnreach(inst),
+ .dbg_stmt => try self.airDbgStmt(inst),
.dbg_inline_begin => try self.airDbgInlineBegin(inst),
.dbg_inline_end => try self.airDbgInlineEnd(inst),
.dbg_block_begin => try self.airDbgBlockBegin(),
@@ -4588,17 +4949,14 @@ pub const FuncGen = struct {
.work_group_id => try self.airWorkGroupId(inst),
// zig fmt: on
};
- if (opt_value) |val| {
- const ref = Air.indexToRef(inst);
- try self.func_inst_table.putNoClobber(self.gpa, ref, val);
- }
+ if (val != .none) try self.func_inst_table.putNoClobber(self.gpa, Air.indexToRef(inst), val);
}
}
- fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value {
+ fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const o = self.dg.object;
const mod = o.module;
const ip = &mod.intern_pool;
@@ -4619,19 +4977,21 @@ pub const FuncGen = struct {
const ret_ptr = if (!sret) null else blk: {
const llvm_ret_ty = try o.lowerType(return_type);
- const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod));
- try llvm_args.append(ret_ptr);
+ const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+ const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment);
+ try llvm_args.append(ret_ptr.toLlvm(&self.wip));
break :blk ret_ptr;
};
const err_return_tracing = return_type.isError(mod) and
o.module.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
- try llvm_args.append(self.err_ret_trace.?);
+ assert(self.err_ret_trace != .none);
+ try llvm_args.append(self.err_ret_trace.toLlvm(&self.wip));
}
var it = iterateParamTypes(o, fn_info);
- while (it.nextCall(self, args)) |lowering| switch (lowering) {
+ while (try it.nextCall(self, args)) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
const arg = args[it.zig_index - 1];
@@ -4639,12 +4999,11 @@ pub const FuncGen = struct {
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try o.lowerType(param_ty);
if (isByRef(param_ty, mod)) {
- const alignment = param_ty.abiAlignment(mod);
- const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, "");
- load_inst.setAlignment(alignment);
- try llvm_args.append(load_inst);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
+ try llvm_args.append(loaded.toLlvm(&self.wip));
} else {
- try llvm_args.append(llvm_arg);
+ try llvm_args.append(llvm_arg.toLlvm(&self.wip));
}
},
.byref => {
@@ -4652,14 +5011,13 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
if (isByRef(param_ty, mod)) {
- try llvm_args.append(llvm_arg);
+ try llvm_args.append(llvm_arg.toLlvm(&self.wip));
} else {
- const alignment = param_ty.abiAlignment(mod);
- const param_llvm_ty = llvm_arg.typeOf();
- const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
- const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
- store_inst.setAlignment(alignment);
- try llvm_args.append(arg_ptr);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
+ const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
+ _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
+ try llvm_args.append(arg_ptr.toLlvm(&self.wip));
}
},
.byref_mut => {
@@ -4667,134 +5025,124 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const alignment = param_ty.abiAlignment(mod);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const param_llvm_ty = try o.lowerType(param_ty);
- const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
+ const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
if (isByRef(param_ty, mod)) {
- const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, "");
- load_inst.setAlignment(alignment);
-
- const store_inst = self.builder.buildStore(load_inst, arg_ptr);
- store_inst.setAlignment(alignment);
- try llvm_args.append(arg_ptr);
+ const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
+ _ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
} else {
- const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
- store_inst.setAlignment(alignment);
- try llvm_args.append(arg_ptr);
+ _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
}
+ try llvm_args.append(arg_ptr.toLlvm(&self.wip));
},
.abi_sized_int => {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
- const int_llvm_ty = self.context.intType(abi_size * 8);
+ const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8));
if (isByRef(param_ty, mod)) {
- const alignment = param_ty.abiAlignment(mod);
- const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, "");
- load_inst.setAlignment(alignment);
- try llvm_args.append(load_inst);
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
+ try llvm_args.append(loaded.toLlvm(&self.wip));
} else {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
- const alignment = @max(
+ const alignment = Builder.Alignment.fromByteUnits(@max(
param_ty.abiAlignment(mod),
- o.target_data.abiAlignmentOfType(int_llvm_ty),
- );
- const int_ptr = self.buildAlloca(int_llvm_ty, alignment);
- const store_inst = self.builder.buildStore(llvm_arg, int_ptr);
- store_inst.setAlignment(alignment);
- const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, "");
- load_inst.setAlignment(alignment);
- try llvm_args.append(load_inst);
+ o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)),
+ ));
+ const int_ptr = try self.buildAlloca(int_llvm_ty, alignment);
+ _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
+ const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
+ try llvm_args.append(loaded.toLlvm(&self.wip));
}
},
.slice => {
const arg = args[it.zig_index - 1];
const llvm_arg = try self.resolveInst(arg);
- const ptr = self.builder.buildExtractValue(llvm_arg, 0, "");
- const len = self.builder.buildExtractValue(llvm_arg, 1, "");
- try llvm_args.ensureUnusedCapacity(2);
- llvm_args.appendAssumeCapacity(ptr);
- llvm_args.appendAssumeCapacity(len);
+ const ptr = try self.wip.extractValue(llvm_arg, &.{0}, "");
+ const len = try self.wip.extractValue(llvm_arg, &.{1}, "");
+ try llvm_args.appendSlice(&.{ ptr.toLlvm(&self.wip), len.toLlvm(&self.wip) });
},
.multiple_llvm_types => {
const arg = args[it.zig_index - 1];
const param_ty = self.typeOf(arg);
- const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len];
+ const llvm_types = it.types_buffer[0..it.types_len];
const llvm_arg = try self.resolveInst(arg);
const is_by_ref = isByRef(param_ty, mod);
- const arg_ptr = if (is_by_ref) llvm_arg else p: {
- const p = self.buildAlloca(llvm_arg.typeOf(), null);
- const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(param_ty.abiAlignment(mod));
- break :p p;
+ const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
+ const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
+ const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
+ break :ptr ptr;
};
- const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False);
- try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
- for (llvm_types, 0..) |field_ty, i_usize| {
- const i = @as(c_uint, @intCast(i_usize));
- const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
- const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
- load_inst.setAlignment(target.ptrBitWidth() / 8);
- llvm_args.appendAssumeCapacity(load_inst);
+ const llvm_ty = try o.builder.structType(.normal, llvm_types);
+ try llvm_args.ensureUnusedCapacity(it.types_len);
+ for (llvm_types, 0..) |field_ty, i| {
+ const alignment =
+ Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
+ const field_ptr = try self.wip.gepStruct(llvm_ty, arg_ptr, i, "");
+ const loaded = try self.wip.load(.normal, field_ty, field_ptr, alignment, "");
+ llvm_args.appendAssumeCapacity(loaded.toLlvm(&self.wip));
}
},
.as_u16 => {
const arg = args[it.zig_index - 1];
const llvm_arg = try self.resolveInst(arg);
- const casted = self.builder.buildBitCast(llvm_arg, self.context.intType(16), "");
- try llvm_args.append(casted);
+ const casted = try self.wip.cast(.bitcast, llvm_arg, .i16, "");
+ try llvm_args.append(casted.toLlvm(&self.wip));
},
.float_array => |count| {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
+ const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
if (!isByRef(arg_ty, mod)) {
- const p = self.buildAlloca(llvm_arg.typeOf(), null);
- const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(mod));
- llvm_arg = store_inst;
+ const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
+ llvm_arg = ptr;
}
const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?);
- const array_llvm_ty = float_ty.arrayType(count);
+ const array_ty = try o.builder.arrayType(count, float_ty);
- const alignment = arg_ty.abiAlignment(mod);
- const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
- load_inst.setAlignment(alignment);
- try llvm_args.append(load_inst);
+ const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, "");
+ try llvm_args.append(loaded.toLlvm(&self.wip));
},
.i32_array, .i64_array => |arr_len| {
const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
+ const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
if (!isByRef(arg_ty, mod)) {
- const p = self.buildAlloca(llvm_arg.typeOf(), null);
- const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(mod));
- llvm_arg = store_inst;
+ const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
+ llvm_arg = ptr;
}
- const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len);
- const alignment = arg_ty.abiAlignment(mod);
- const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
- load_inst.setAlignment(alignment);
- try llvm_args.append(load_inst);
+ const array_ty =
+ try o.builder.arrayType(arr_len, try o.builder.intType(@intCast(elem_size)));
+ const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, "");
+ try llvm_args.append(loaded.toLlvm(&self.wip));
},
};
- const call = self.builder.buildCall(
- try o.lowerType(zig_fn_ty),
- llvm_fn,
- llvm_args.items.ptr,
- @as(c_uint, @intCast(llvm_args.items.len)),
- toLlvmCallConv(fn_info.cc, target),
- attr,
- "",
+ const llvm_fn_ty = try o.lowerType(zig_fn_ty);
+ const call = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn.toLlvm(&self.wip),
+ llvm_args.items.ptr,
+ @intCast(llvm_args.items.len),
+ toLlvmCallConv(fn_info.cc, target),
+ attr,
+ "",
+ ),
+ &self.wip,
);
if (callee_ty.zigTypeTag(mod) == .Pointer) {
@@ -4802,12 +5150,12 @@ pub const FuncGen = struct {
it = iterateParamTypes(o, fn_info);
it.llvm_index += @intFromBool(sret);
it.llvm_index += @intFromBool(err_return_tracing);
- while (it.next()) |lowering| switch (lowering) {
+ while (try it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types.get(ip)[param_index].toType();
if (!isByRef(param_ty, mod)) {
- o.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
+ o.addByValParamAttrs(call.toLlvm(&self.wip), param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
@@ -4815,10 +5163,10 @@ pub const FuncGen = struct {
const param_ty = fn_info.param_types.get(ip)[param_index].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = param_ty.abiAlignment(mod);
- o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
+ o.addByRefParamAttrs(call.toLlvm(&self.wip), it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
- o.addArgAttr(call, it.llvm_index - 1, "noundef");
+ o.addArgAttr(call.toLlvm(&self.wip), it.llvm_index - 1, "noundef");
},
// No attributes needed for these.
.no_bits,
@@ -4838,41 +5186,40 @@ pub const FuncGen = struct {
if (math.cast(u5, it.zig_index - 1)) |i| {
if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
- o.addArgAttr(call, llvm_arg_i, "noalias");
+ o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "noalias");
}
}
if (param_ty.zigTypeTag(mod) != .Optional) {
- o.addArgAttr(call, llvm_arg_i, "nonnull");
+ o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "nonnull");
}
if (ptr_info.flags.is_const) {
- o.addArgAttr(call, llvm_arg_i, "readonly");
+ o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "readonly");
}
const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1);
- o.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
+ o.addArgAttrInt(call.toLlvm(&self.wip), llvm_arg_i, "align", elem_align);
},
};
}
if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) {
- return null;
+ return .none;
}
if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
- return null;
+ return .none;
}
const llvm_ret_ty = try o.lowerType(return_type);
if (ret_ptr) |rp| {
- call.setCallSret(llvm_ret_ty);
+ call.toLlvm(&self.wip).setCallSret(llvm_ret_ty.toLlvm(&o.builder));
if (isByRef(return_type, mod)) {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
- const loaded = self.builder.buildLoad(llvm_ret_ty, rp, "");
- loaded.setAlignment(return_type.abiAlignment(mod));
- return loaded;
+ const return_alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+ return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
}
}
@@ -4882,26 +5229,23 @@ pub const FuncGen = struct {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
// by using our canonical type, then loading it if necessary.
- const alignment = o.target_data.abiAlignmentOfType(abi_ret_ty);
- const rp = self.buildAlloca(llvm_ret_ty, alignment);
- const store_inst = self.builder.buildStore(call, rp);
- store_inst.setAlignment(alignment);
- if (isByRef(return_type, mod)) {
- return rp;
- } else {
- const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, "");
- load_inst.setAlignment(alignment);
- return load_inst;
- }
+ const alignment = Builder.Alignment.fromByteUnits(
+ o.target_data.abiAlignmentOfType(abi_ret_ty.toLlvm(&o.builder)),
+ );
+ const rp = try self.buildAlloca(llvm_ret_ty, alignment);
+ _ = try self.wip.store(.normal, call, rp, alignment);
+ return if (isByRef(return_type, mod))
+ rp
+ else
+ try self.wip.load(.normal, llvm_ret_ty, rp, alignment, "");
}
if (isByRef(return_type, mod)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
- const alignment = return_type.abiAlignment(mod);
- const rp = self.buildAlloca(llvm_ret_ty, alignment);
- const store_inst = self.builder.buildStore(call, rp);
- store_inst.setAlignment(alignment);
+ const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
+ const rp = try self.buildAlloca(llvm_ret_ty, alignment);
+ _ = try self.wip.store(.normal, call, rp, alignment);
return rp;
} else {
return call;
@@ -4914,13 +5258,10 @@ pub const FuncGen = struct {
const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?;
const msg_decl = mod.declPtr(msg_decl_index);
const msg_len = msg_decl.ty.childType(mod).arrayLen(mod);
- const msg_ptr = try o.lowerValue(.{
- .ty = msg_decl.ty,
- .val = msg_decl.val,
- });
- const null_opt_addr_global = try o.getNullOptAddr();
+ const msg_ptr = try o.lowerValue(try msg_decl.internValue(mod));
+ const null_opt_addr_global = try fg.resolveNullOptUsize();
const target = mod.getTarget();
- const llvm_usize = fg.context.intType(target.ptrBitWidth());
+ const llvm_usize = try o.lowerType(Type.usize);
// example:
// call fastcc void @test2.panic(
// ptr @builtin.panic_messages.integer_overflow__anon_987, ; msg.ptr
@@ -4929,38 +5270,38 @@ pub const FuncGen = struct {
// ptr @2, ; addr (null ?usize)
// )
const args = [4]*llvm.Value{
- msg_ptr,
- llvm_usize.constInt(msg_len, .False),
- fg.context.pointerType(0).constNull(),
- null_opt_addr_global,
+ msg_ptr.toLlvm(&o.builder),
+ (try o.builder.intConst(llvm_usize, msg_len)).toLlvm(&o.builder),
+ (try o.builder.nullConst(.ptr)).toLlvm(&o.builder),
+ null_opt_addr_global.toLlvm(&o.builder),
};
const panic_func = mod.funcInfo(mod.panic_func_index);
const panic_decl = mod.declPtr(panic_func.owner_decl);
const fn_info = mod.typeToFunc(panic_decl.ty).?;
const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl);
- _ = fg.builder.buildCall(
- try o.lowerType(panic_decl.ty),
- panic_global,
+ _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildCall(
+ (try o.lowerType(panic_decl.ty)).toLlvm(&o.builder),
+ panic_global.toLlvm(&o.builder),
&args,
args.len,
toLlvmCallConv(fn_info.cc, target),
.Auto,
"",
- );
- _ = fg.builder.buildUnreachable();
+ ), &fg.wip);
+ _ = try fg.wip.@"unreachable"();
}
- fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airRet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ret_ty = self.typeOf(un_op);
- if (self.ret_ptr) |ret_ptr| {
+ if (self.ret_ptr != .none) {
const operand = try self.resolveInst(un_op);
const ptr_ty = try mod.singleMutPtrType(ret_ty);
- try self.store(ret_ptr, ptr_ty, operand, .NotAtomic);
- _ = self.builder.buildRetVoid();
- return null;
+ try self.store(self.ret_ptr, ptr_ty, operand, .none);
+ _ = try self.wip.retVoid();
+ return .none;
}
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -4968,43 +5309,37 @@ pub const FuncGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_int = try o.lowerType(Type.anyerror);
- _ = self.builder.buildRet(err_int.constInt(0, .False));
+ _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0));
} else {
- _ = self.builder.buildRetVoid();
+ _ = try self.wip.retVoid();
}
- return null;
+ return .none;
}
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
const operand = try self.resolveInst(un_op);
- const alignment = ret_ty.abiAlignment(mod);
+ const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
if (isByRef(ret_ty, mod)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
- const load_inst = self.builder.buildLoad(abi_ret_ty, operand, "");
- load_inst.setAlignment(alignment);
- _ = self.builder.buildRet(load_inst);
- return null;
+ _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, ""));
+ return .none;
}
- const llvm_ret_ty = operand.typeOf();
+ const llvm_ret_ty = operand.typeOfWip(&self.wip);
if (abi_ret_ty == llvm_ret_ty) {
- _ = self.builder.buildRet(operand);
- return null;
+ _ = try self.wip.ret(operand);
+ return .none;
}
- const rp = self.buildAlloca(llvm_ret_ty, alignment);
- const store_inst = self.builder.buildStore(operand, rp);
- store_inst.setAlignment(alignment);
- const load_inst = self.builder.buildLoad(abi_ret_ty, rp, "");
- load_inst.setAlignment(alignment);
- _ = self.builder.buildRet(load_inst);
- return null;
+ const rp = try self.buildAlloca(llvm_ret_ty, alignment);
+ _ = try self.wip.store(.normal, operand, rp, alignment);
+ _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, rp, alignment, ""));
+ return .none;
}
- fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -5016,36 +5351,34 @@ pub const FuncGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_int = try o.lowerType(Type.anyerror);
- _ = self.builder.buildRet(err_int.constInt(0, .False));
+ _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0));
} else {
- _ = self.builder.buildRetVoid();
+ _ = try self.wip.retVoid();
}
- return null;
+ return .none;
}
- if (self.ret_ptr != null) {
- _ = self.builder.buildRetVoid();
- return null;
+ if (self.ret_ptr != .none) {
+ _ = try self.wip.retVoid();
+ return .none;
}
const ptr = try self.resolveInst(un_op);
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
- const loaded = self.builder.buildLoad(abi_ret_ty, ptr, "");
- loaded.setAlignment(ret_ty.abiAlignment(mod));
- _ = self.builder.buildRet(loaded);
- return null;
+ const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
+ _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
+ return .none;
}
- fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const list = try self.resolveInst(ty_op.operand);
const arg_ty = self.air.getRefType(ty_op.ty);
const llvm_arg_ty = try o.lowerType(arg_ty);
- return self.builder.buildVAArg(list, llvm_arg_ty, "");
+ return self.wip.vaArg(list, llvm_arg_ty, "");
}
- fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_list = try self.resolveInst(ty_op.operand);
@@ -5053,75 +5386,86 @@ pub const FuncGen = struct {
const llvm_va_list_ty = try o.lowerType(va_list_ty);
const mod = o.module;
- const result_alignment = va_list_ty.abiAlignment(mod);
- const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+ const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
+ const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_copy";
- const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const param_types = [_]*llvm.Type{
- self.context.pointerType(0),
- self.context.pointerType(0),
- };
- const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
- break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
+ const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal);
+ const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse
+ o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder));
- const args: [2]*llvm.Value = .{ dest_list, src_list };
- _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ const args: [2]*llvm.Value = .{ dest_list.toLlvm(&self.wip), src_list.toLlvm(&self.wip) };
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
- if (isByRef(va_list_ty, mod)) {
- return dest_list;
- } else {
- const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, "");
- loaded.setAlignment(result_alignment);
- return loaded;
- }
+ return if (isByRef(va_list_ty, mod))
+ dest_list
+ else
+ try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, "");
}
- fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const list = try self.resolveInst(un_op);
const llvm_fn_name = "llvm.va_end";
- const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const param_types = [_]*llvm.Type{self.context.pointerType(0)};
- const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
- break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
- const args: [1]*llvm.Value = .{list};
- _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- return null;
+ const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal);
+ const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse
+ o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder));
+
+ const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)};
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
+ return .none;
}
- fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try o.lowerType(va_list_ty);
- const result_alignment = va_list_ty.abiAlignment(mod);
- const list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+ const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
+ const list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_start";
- const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const param_types = [_]*llvm.Type{self.context.pointerType(0)};
- const fn_type = llvm.functionType(self.context.voidType(), &param_types, param_types.len, .False);
- break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
- const args: [1]*llvm.Value = .{list};
- _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal);
+ const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse
+ o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder));
- if (isByRef(va_list_ty, mod)) {
- return list;
- } else {
- const loaded = self.builder.buildLoad(llvm_va_list_ty, list, "");
- loaded.setAlignment(result_alignment);
- return loaded;
- }
+ const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)};
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
+
+ return if (isByRef(va_list_ty, mod))
+ list
+ else
+ try self.wip.load(.normal, llvm_va_list_ty, list, result_alignment, "");
}
- fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
+ fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -5132,7 +5476,7 @@ pub const FuncGen = struct {
return self.cmp(lhs, rhs, operand_ty, op);
}
- fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5146,21 +5490,30 @@ pub const FuncGen = struct {
return self.cmp(lhs, rhs, vec_ty, cmp_op);
}
- fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const llvm_fn = try self.getCmpLtErrorsLenFunction();
- const args: [1]*llvm.Value = .{operand};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ const args: [1]*llvm.Value = .{operand.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall(
+ llvm_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
}
fn cmp(
self: *FuncGen,
- lhs: *llvm.Value,
- rhs: *llvm.Value,
+ lhs: Builder.Value,
+ rhs: Builder.Value,
operand_ty: Type,
op: math.CompareOperator,
- ) Allocator.Error!*llvm.Value {
+ ) Allocator.Error!Builder.Value {
const o = self.dg.object;
const mod = o.module;
const scalar_ty = operand_ty.scalarType(mod);
@@ -5178,46 +5531,47 @@ pub const FuncGen = struct {
// of optionals that are not pointers.
const is_by_ref = isByRef(scalar_ty, mod);
const opt_llvm_ty = try o.lowerType(scalar_ty);
- const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref);
- const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref);
- const llvm_i2 = self.context.intType(2);
- const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2, "");
- const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2, "");
- const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, llvm_i2.constInt(1, .False), "");
- const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, "");
- const both_null_block = self.context.appendBasicBlock(self.llvm_func, "BothNull");
- const mixed_block = self.context.appendBasicBlock(self.llvm_func, "Mixed");
- const both_pl_block = self.context.appendBasicBlock(self.llvm_func, "BothNonNull");
- const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
- const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block, 2);
- const llvm_i2_00 = llvm_i2.constInt(0b00, .False);
- const llvm_i2_11 = llvm_i2.constInt(0b11, .False);
- llvm_switch.addCase(llvm_i2_00, both_null_block);
- llvm_switch.addCase(llvm_i2_11, both_pl_block);
-
- self.builder.positionBuilderAtEnd(both_null_block);
- _ = self.builder.buildBr(end_block);
-
- self.builder.positionBuilderAtEnd(mixed_block);
- _ = self.builder.buildBr(end_block);
-
- self.builder.positionBuilderAtEnd(both_pl_block);
+ const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref);
+ const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref);
+ const llvm_i2 = try o.builder.intType(2);
+ const lhs_non_null_i2 = try self.wip.cast(.zext, lhs_non_null, llvm_i2, "");
+ const rhs_non_null_i2 = try self.wip.cast(.zext, rhs_non_null, llvm_i2, "");
+ const lhs_shifted = try self.wip.bin(.shl, lhs_non_null_i2, try o.builder.intValue(llvm_i2, 1), "");
+ const lhs_rhs_ored = try self.wip.bin(.@"or", lhs_shifted, rhs_non_null_i2, "");
+ const both_null_block = try self.wip.block(1, "BothNull");
+ const mixed_block = try self.wip.block(1, "Mixed");
+ const both_pl_block = try self.wip.block(1, "BothNonNull");
+ const end_block = try self.wip.block(3, "End");
+ var wip_switch = try self.wip.@"switch"(lhs_rhs_ored, mixed_block, 2);
+ defer wip_switch.finish(&self.wip);
+ try wip_switch.addCase(
+ try o.builder.intConst(llvm_i2, 0b00),
+ both_null_block,
+ &self.wip,
+ );
+ try wip_switch.addCase(
+ try o.builder.intConst(llvm_i2, 0b11),
+ both_pl_block,
+ &self.wip,
+ );
+
+ self.wip.cursor = .{ .block = both_null_block };
+ _ = try self.wip.br(end_block);
+
+ self.wip.cursor = .{ .block = mixed_block };
+ _ = try self.wip.br(end_block);
+
+ self.wip.cursor = .{ .block = both_pl_block };
const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true);
const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true);
const payload_cmp = try self.cmp(lhs_payload, rhs_payload, payload_ty, op);
- _ = self.builder.buildBr(end_block);
- const both_pl_block_end = self.builder.getInsertBlock();
+ _ = try self.wip.br(end_block);
+ const both_pl_block_end = self.wip.cursor.block;
- self.builder.positionBuilderAtEnd(end_block);
- const incoming_blocks: [3]*llvm.BasicBlock = .{
- both_null_block,
- mixed_block,
- both_pl_block_end,
- };
- const llvm_i1 = self.context.intType(1);
- const llvm_i1_0 = llvm_i1.constInt(0, .False);
- const llvm_i1_1 = llvm_i1.constInt(1, .False);
- const incoming_values: [3]*llvm.Value = .{
+ self.wip.cursor = .{ .block = end_block };
+ const llvm_i1_0 = try o.builder.intValue(.i1, 0);
+ const llvm_i1_1 = try o.builder.intValue(.i1, 1);
+ const incoming_values: [3]Builder.Value = .{
switch (op) {
.eq => llvm_i1_1,
.neq => llvm_i1_0,
@@ -5231,47 +5585,48 @@ pub const FuncGen = struct {
payload_cmp,
};
- const phi_node = self.builder.buildPhi(llvm_i1, "");
- comptime assert(incoming_values.len == incoming_blocks.len);
- phi_node.addIncoming(
+ const phi = try self.wip.phi(.i1, "");
+ try phi.finish(
&incoming_values,
- &incoming_blocks,
- incoming_values.len,
+ &.{ both_null_block, mixed_block, both_pl_block_end },
+ &self.wip,
);
- return phi_node;
+ return phi.toValue();
},
.Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }),
else => unreachable,
};
const is_signed = int_ty.isSignedInt(mod);
- const operation: llvm.IntPredicate = switch (op) {
- .eq => .EQ,
- .neq => .NE,
- .lt => if (is_signed) llvm.IntPredicate.SLT else .ULT,
- .lte => if (is_signed) llvm.IntPredicate.SLE else .ULE,
- .gt => if (is_signed) llvm.IntPredicate.SGT else .UGT,
- .gte => if (is_signed) llvm.IntPredicate.SGE else .UGE,
+ const cond: Builder.IntegerCondition = switch (op) {
+ .eq => .eq,
+ .neq => .ne,
+ .lt => if (is_signed) .slt else .ult,
+ .lte => if (is_signed) .sle else .ule,
+ .gt => if (is_signed) .sgt else .ugt,
+ .gte => if (is_signed) .sge else .uge,
};
- return self.builder.buildICmp(operation, lhs, rhs, "");
+ return self.wip.icmp(cond, lhs, rhs, "");
}
- fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const inst_ty = self.typeOfIndex(inst);
- const parent_bb = self.context.createBasicBlock("Block");
if (inst_ty.isNoReturn(mod)) {
try self.genBody(body);
- return null;
+ return .none;
}
- var breaks: BreakList = .{};
- defer breaks.deinit(self.gpa);
+ const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod);
+
+ var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 };
+ defer if (have_block_result) breaks.list.deinit(self.gpa);
+ const parent_bb = try self.wip.block(0, "Block");
try self.blocks.putNoClobber(self.gpa, inst, .{
.parent_bb = parent_bb,
.breaks = &breaks,
@@ -5280,36 +5635,33 @@ pub const FuncGen = struct {
try self.genBody(body);
- self.llvm_func.appendExistingBasicBlock(parent_bb);
- self.builder.positionBuilderAtEnd(parent_bb);
+ self.wip.cursor = .{ .block = parent_bb };
// Create a phi node only if the block returns a value.
- const is_body = inst_ty.zigTypeTag(mod) == .Fn;
- if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
-
- const raw_llvm_ty = try o.lowerType(inst_ty);
-
- const llvm_ty = ty: {
- // If the zig tag type is a function, this represents an actual function body; not
- // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
- // of function pointers, however the phi makes it a runtime value and therefore
- // the LLVM type has to be wrapped in a pointer.
- if (is_body or isByRef(inst_ty, mod)) {
- break :ty self.context.pointerType(0);
- }
- break :ty raw_llvm_ty;
- };
+ if (have_block_result) {
+ const raw_llvm_ty = try o.lowerType(inst_ty);
+ const llvm_ty: Builder.Type = ty: {
+ // If the zig tag type is a function, this represents an actual function body; not
+ // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
+ // of function pointers, however the phi makes it a runtime value and therefore
+ // the LLVM type has to be wrapped in a pointer.
+ if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) {
+ break :ty .ptr;
+ }
+ break :ty raw_llvm_ty;
+ };
- const phi_node = self.builder.buildPhi(llvm_ty, "");
- phi_node.addIncoming(
- breaks.items(.val).ptr,
- breaks.items(.bb).ptr,
- @as(c_uint, @intCast(breaks.len)),
- );
- return phi_node;
+ parent_bb.ptr(&self.wip).incoming = @intCast(breaks.list.len);
+ const phi = try self.wip.phi(llvm_ty, "");
+ try phi.finish(breaks.list.items(.val), breaks.list.items(.bb), &self.wip);
+ return phi.toValue();
+ } else {
+ parent_bb.ptr(&self.wip).incoming = @intCast(breaks.len);
+ return .none;
+ }
}
- fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const branch = self.air.instructions.items(.data)[inst].br;
const block = self.blocks.get(branch.block_inst).?;
@@ -5317,42 +5669,39 @@ pub const FuncGen = struct {
// Add the values to the lists only if the break provides a value.
const operand_ty = self.typeOf(branch.operand);
const mod = o.module;
- if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
// break instructions.
- try block.breaks.append(self.gpa, .{
- .bb = self.builder.getInsertBlock(),
- .val = val,
- });
- }
- _ = self.builder.buildBr(block.parent_bb);
- return null;
+ try block.breaks.list.append(self.gpa, .{ .bb = self.wip.cursor.block, .val = val });
+ } else block.breaks.len += 1;
+ _ = try self.wip.br(block.parent_bb);
+ return .none;
}
- fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
- const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
- const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- _ = self.builder.buildCondBr(cond, then_block, else_block);
+ const then_block = try self.wip.block(1, "Then");
+ const else_block = try self.wip.block(1, "Else");
+ _ = try self.wip.brCond(cond, then_block, else_block);
- self.builder.positionBuilderAtEnd(then_block);
+ self.wip.cursor = .{ .block = then_block };
try self.genBody(then_body);
- self.builder.positionBuilderAtEnd(else_block);
+ self.wip.cursor = .{ .block = else_block };
try self.genBody(else_body);
// No need to reset the insert cursor since this instruction is noreturn.
- return null;
+ return .none;
}
- fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -5367,7 +5716,7 @@ pub const FuncGen = struct {
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
- fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5381,13 +5730,13 @@ pub const FuncGen = struct {
fn lowerTry(
fg: *FuncGen,
- err_union: *llvm.Value,
+ err_union: Builder.Value,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
can_elide_load: bool,
is_unused: bool,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = fg.dg.object;
const mod = o.module;
const payload_ty = err_union_ty.errorUnionPayload(mod);
@@ -5395,122 +5744,135 @@ pub const FuncGen = struct {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
- const is_err = err: {
- const err_set_ty = try o.lowerType(Type.anyerror);
- const zero = err_set_ty.constNull();
+ const loaded = loaded: {
if (!payload_has_bits) {
// TODO add alignment to this load
- const loaded = if (operand_is_ptr)
- fg.builder.buildLoad(err_set_ty, err_union, "")
+ break :loaded if (operand_is_ptr)
+ try fg.wip.load(.normal, Builder.Type.err_int, err_union, .default, "")
else
err_union;
- break :err fg.builder.buildICmp(.NE, loaded, zero, "");
}
const err_field_index = errUnionErrorOffset(payload_ty, mod);
if (operand_is_ptr or isByRef(err_union_ty, mod)) {
- const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, "");
+ const err_field_ptr =
+ try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
- const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, "");
- break :err fg.builder.buildICmp(.NE, loaded, zero, "");
+ break :loaded try fg.wip.load(
+ .normal,
+ Builder.Type.err_int,
+ err_field_ptr,
+ .default,
+ "",
+ );
}
- const loaded = fg.builder.buildExtractValue(err_union, err_field_index, "");
- break :err fg.builder.buildICmp(.NE, loaded, zero, "");
+ break :loaded try fg.wip.extractValue(err_union, &.{err_field_index}, "");
};
+ const zero = try o.builder.intValue(Builder.Type.err_int, 0);
+ const is_err = try fg.wip.icmp(.ne, loaded, zero, "");
- const return_block = fg.context.appendBasicBlock(fg.llvm_func, "TryRet");
- const continue_block = fg.context.appendBasicBlock(fg.llvm_func, "TryCont");
- _ = fg.builder.buildCondBr(is_err, return_block, continue_block);
+ const return_block = try fg.wip.block(1, "TryRet");
+ const continue_block = try fg.wip.block(1, "TryCont");
+ _ = try fg.wip.brCond(is_err, return_block, continue_block);
- fg.builder.positionBuilderAtEnd(return_block);
+ fg.wip.cursor = .{ .block = return_block };
try fg.genBody(body);
- fg.builder.positionBuilderAtEnd(continue_block);
- }
- if (is_unused) {
- return null;
- }
- if (!payload_has_bits) {
- return if (operand_is_ptr) err_union else null;
+ fg.wip.cursor = .{ .block = continue_block };
}
+ if (is_unused) return .none;
+ if (!payload_has_bits) return if (operand_is_ptr) err_union else .none;
const offset = errUnionPayloadOffset(payload_ty, mod);
if (operand_is_ptr) {
- return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
+ return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
} else if (isByRef(err_union_ty, mod)) {
- const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
+ const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
+ const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
- return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
+ return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false);
}
- const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(mod));
- return load_inst;
+ const load_ty = err_union_llvm_ty.structFields(&o.builder)[offset];
+ return fg.wip.load(.normal, load_ty, payload_ptr, payload_alignment, "");
}
- return fg.builder.buildExtractValue(err_union, offset, "");
+ return fg.wip.extractValue(err_union, &.{offset}, "");
}
- fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
- const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- const target = mod.getTarget();
- const llvm_usize = self.context.intType(target.ptrBitWidth());
- const cond_int = if (cond.typeOf().getTypeKind() == .Pointer)
- self.builder.buildPtrToInt(cond, llvm_usize, "")
+ const else_block = try self.wip.block(1, "Default");
+ const llvm_usize = try o.lowerType(Type.usize);
+ const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder))
+ try self.wip.cast(.ptrtoint, cond, llvm_usize, "")
else
cond;
- const llvm_switch = self.builder.buildSwitch(cond_int, else_block, switch_br.data.cases_len);
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
+ var llvm_cases_len: u32 = 0;
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items: []const Air.Inst.Ref =
+ @ptrCast(self.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + case.data.items_len + case_body.len;
+
+ llvm_cases_len += @intCast(items.len);
+ }
+
+ var wip_switch = try self.wip.@"switch"(cond_int, else_block, llvm_cases_len);
+ defer wip_switch.finish(&self.wip);
+ extra_index = switch_br.end;
+ case_i = 0;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
- const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
+ const items: []const Air.Inst.Ref =
+ @ptrCast(self.air.extra[case.end..][0..case.data.items_len]);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
- const case_block = self.context.appendBasicBlock(self.llvm_func, "Case");
+ const case_block = try self.wip.block(@intCast(items.len), "Case");
for (items) |item| {
- const llvm_item = try self.resolveInst(item);
- const llvm_int_item = if (llvm_item.typeOf().getTypeKind() == .Pointer)
- llvm_item.constPtrToInt(llvm_usize)
+ const llvm_item = (try self.resolveInst(item)).toConst().?;
+ const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder))
+ try o.builder.castConst(.ptrtoint, llvm_item, llvm_usize)
else
llvm_item;
- llvm_switch.addCase(llvm_int_item, case_block);
+ try wip_switch.addCase(llvm_int_item, case_block, &self.wip);
}
- self.builder.positionBuilderAtEnd(case_block);
+ self.wip.cursor = .{ .block = case_block };
try self.genBody(case_body);
}
- self.builder.positionBuilderAtEnd(else_block);
+ self.wip.cursor = .{ .block = else_block };
const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
if (else_body.len != 0) {
try self.genBody(else_body);
} else {
- _ = self.builder.buildUnreachable();
+ _ = try self.wip.@"unreachable"();
}
// No need to reset the insert cursor since this instruction is noreturn.
- return null;
+ return .none;
}
- fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop");
- _ = self.builder.buildBr(loop_block);
+ const loop_block = try self.wip.block(2, "Loop");
+ _ = try self.wip.br(loop_block);
- self.builder.positionBuilderAtEnd(loop_block);
+ self.wip.cursor = .{ .block = loop_block };
try self.genBody(body);
// TODO instead of this logic, change AIR to have the property that
@@ -5520,35 +5882,30 @@ pub const FuncGen = struct {
// be while(true) instead of for(body), which will eliminate 1 branch on
// a hot path.
if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) {
- _ = self.builder.buildBr(loop_block);
+ _ = try self.wip.br(loop_block);
}
- return null;
+ return .none;
}
- fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const array_ty = operand_ty.childType(mod);
const llvm_usize = try o.lowerType(Type.usize);
- const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False);
+ const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod));
const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
- if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, "");
- return self.builder.buildInsertValue(partial, len, 1, "");
- }
- const indices: [2]*llvm.Value = .{
- llvm_usize.constNull(), llvm_usize.constNull(),
- };
- const array_llvm_ty = try o.lowerType(array_ty);
- const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, "");
- const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, "");
- return self.builder.buildInsertValue(partial, len, 1, "");
+ if (!array_ty.hasRuntimeBitsIgnoreComptime(mod))
+ return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, "");
+ const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{
+ try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0),
+ }, "");
+ return self.wip.buildAggregate(slice_llvm_ty, &.{ ptr, len }, "");
}
- fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -5562,51 +5919,53 @@ pub const FuncGen = struct {
const dest_llvm_ty = try o.lowerType(dest_ty);
const target = mod.getTarget();
- if (intrinsicsAllowed(dest_scalar_ty, target)) {
- if (operand_scalar_ty.isSignedInt(mod)) {
- return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
- } else {
- return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
- }
- }
+ if (intrinsicsAllowed(dest_scalar_ty, target)) return self.wip.conv(
+ if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned,
+ operand,
+ dest_llvm_ty,
+ "",
+ );
- const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod)));
- const rt_int_bits = compilerRtIntBits(operand_bits);
- const rt_int_ty = self.context.intType(rt_int_bits);
- var extended = e: {
- if (operand_scalar_ty.isSignedInt(mod)) {
- break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, "");
- } else {
- break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, "");
- }
- };
+ const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod)));
+ const rt_int_ty = try o.builder.intType(rt_int_bits);
+ var extended = try self.wip.conv(
+ if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned,
+ operand,
+ rt_int_ty,
+ "",
+ );
const dest_bits = dest_scalar_ty.floatBits(target);
const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits);
const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits);
const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un";
- var fn_name_buf: [64]u8 = undefined;
- const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{
+ const fn_name = try o.builder.fmt("__float{s}{s}i{s}f", .{
sign_prefix,
compiler_rt_operand_abbrev,
compiler_rt_dest_abbrev,
- }) catch unreachable;
+ });
- var param_types = [1]*llvm.Type{rt_int_ty};
+ var param_type = rt_int_ty;
if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
// i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have.
- const v2i64 = self.context.intType(64).vectorType(2);
- extended = self.builder.buildBitCast(extended, v2i64, "");
- param_types = [1]*llvm.Type{v2i64};
+ param_type = try o.builder.vectorType(.normal, 2, .i64);
+ extended = try self.wip.cast(.bitcast, extended, param_type, "");
}
- const libc_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
- const params = [1]*llvm.Value{extended};
-
- return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, &params, params.len, .C, .Auto, "");
+ const libc_fn = try self.getLibcFunction(fn_name, &.{param_type}, dest_llvm_ty);
+ const params = [1]*llvm.Value{extended.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall(
+ libc_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ libc_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -5624,19 +5983,20 @@ pub const FuncGen = struct {
if (intrinsicsAllowed(operand_scalar_ty, target)) {
// TODO set fast math flag
- if (dest_scalar_ty.isSignedInt(mod)) {
- return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
- } else {
- return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
- }
+ return self.wip.conv(
+ if (dest_scalar_ty.isSignedInt(mod)) .signed else .unsigned,
+ operand,
+ dest_llvm_ty,
+ "",
+ );
}
- const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod))));
- const ret_ty = self.context.intType(rt_int_bits);
+ const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod)));
+ const ret_ty = try o.builder.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
// i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have.
- break :b self.context.intType(64).vectorType(2);
+ break :b try o.builder.vectorType(.normal, 2, .i64);
} else ret_ty;
const operand_bits = operand_scalar_ty.floatBits(target);
@@ -5645,66 +6005,66 @@ pub const FuncGen = struct {
const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits);
const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns";
- var fn_name_buf: [64]u8 = undefined;
- const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{
+ const fn_name = try o.builder.fmt("__fix{s}{s}f{s}i", .{
sign_prefix,
compiler_rt_operand_abbrev,
compiler_rt_dest_abbrev,
- }) catch unreachable;
+ });
const operand_llvm_ty = try o.lowerType(operand_ty);
- const param_types = [1]*llvm.Type{operand_llvm_ty};
- const libc_fn = self.getLibcFunction(fn_name, &param_types, libc_ret_ty);
- const params = [1]*llvm.Value{operand};
-
- var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, &params, params.len, .C, .Auto, "");
+ const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, libc_ret_ty);
+ const params = [1]*llvm.Value{operand.toLlvm(&self.wip)};
+ var result = (try self.wip.unimplemented(libc_ret_ty, "")).finish(self.builder.buildCall(
+ libc_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ libc_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
- if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty, "");
- if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty, "");
+ if (libc_ret_ty != ret_ty) result = try self.wip.cast(.bitcast, result, ret_ty, "");
+ if (ret_ty != dest_llvm_ty) result = try self.wip.cast(.trunc, result, dest_llvm_ty, "");
return result;
}
- fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
+ fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.dg.object;
const mod = o.module;
- if (ty.isSlice(mod)) {
- return fg.builder.buildExtractValue(ptr, 0, "");
- } else {
- return ptr;
- }
+ return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr;
}
- fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
+ fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value {
const o = fg.dg.object;
const mod = o.module;
- const target = mod.getTarget();
- const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
+ const llvm_usize = try o.lowerType(Type.usize);
switch (ty.ptrSize(mod)) {
.Slice => {
- const len = fg.builder.buildExtractValue(ptr, 1, "");
+ const len = try fg.wip.extractValue(ptr, &.{1}, "");
const elem_ty = ty.childType(mod);
const abi_size = elem_ty.abiSize(mod);
if (abi_size == 1) return len;
- const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
- return fg.builder.buildMul(len, abi_size_llvm_val, "");
+ const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size);
+ return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, "");
},
.One => {
const array_ty = ty.childType(mod);
const elem_ty = array_ty.childType(mod);
const abi_size = elem_ty.abiSize(mod);
- return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False);
+ return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size);
},
.Many, .C => unreachable,
}
}
- fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
+ fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: u32) !Builder.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.builder.buildExtractValue(operand, index, "");
+ return self.wip.extractValue(operand, &.{index}, "");
}
- fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
+ fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -5712,10 +6072,10 @@ pub const FuncGen = struct {
const slice_ptr_ty = self.typeOf(ty_op.operand);
const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod));
- return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, "");
+ return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, "");
}
- fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -5725,20 +6085,20 @@ pub const FuncGen = struct {
const index = try self.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType(mod);
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
- const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*llvm.Value = .{index};
- const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
+ const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
+ const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail))
return ptr;
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
+ const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+ return self.loadByRef(ptr, elem_ty, elem_alignment, false);
}
return self.load(ptr, slice_ty);
}
- fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5748,12 +6108,11 @@ pub const FuncGen = struct {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod));
- const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*llvm.Value = .{index};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
+ const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
+ return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
}
- fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -5765,13 +6124,15 @@ pub const FuncGen = struct {
const array_llvm_ty = try o.lowerType(array_ty);
const elem_ty = array_ty.childType(mod);
if (isByRef(array_ty, mod)) {
- const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
+ const indices: [2]Builder.Value = .{
+ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs,
+ };
if (isByRef(elem_ty, mod)) {
- const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, "");
- if (canElideLoad(self, body_tail))
- return elem_ptr;
-
- return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false);
+ const elem_ptr =
+ try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
+ if (canElideLoad(self, body_tail)) return elem_ptr;
+ const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+ return self.loadByRef(elem_ptr, elem_ty, elem_alignment, false);
} else {
const elem_llvm_ty = try o.lowerType(elem_ty);
if (Air.refToIndex(bin_op.lhs)) |lhs_index| {
@@ -5781,26 +6142,38 @@ pub const FuncGen = struct {
if (Air.refToIndex(load_ptr)) |load_ptr_index| {
const load_ptr_tag = self.air.instructions.items(.tag)[load_ptr_index];
switch (load_ptr_tag) {
- .struct_field_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3 => {
+ .struct_field_ptr,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ => {
const load_ptr_inst = try self.resolveInst(load_ptr);
- const gep = self.builder.buildInBoundsGEP(array_llvm_ty, load_ptr_inst, &indices, indices.len, "");
- return self.builder.buildLoad(elem_llvm_ty, gep, "");
+ const gep = try self.wip.gep(
+ .inbounds,
+ array_llvm_ty,
+ load_ptr_inst,
+ &indices,
+ "",
+ );
+ return self.wip.load(.normal, elem_llvm_ty, gep, .default, "");
},
else => {},
}
}
}
}
- const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, "");
- return self.builder.buildLoad(elem_llvm_ty, elem_ptr, "");
+ const elem_ptr =
+ try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
+ return self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, "");
}
}
// This branch can be reached for vectors, which are always by-value.
- return self.builder.buildExtractElement(array_llvm_val, rhs, "");
+ return self.wip.extractElement(array_llvm_val, rhs, "");
}
- fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -5811,32 +6184,28 @@ pub const FuncGen = struct {
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
// TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
- const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: {
+ const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
// If this is a single-item pointer to an array, we need another index in the GEP.
- const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
- break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- } else ptr: {
- const indices: [1]*llvm.Value = .{rhs};
- break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- };
+ &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
+ else
+ &.{rhs}, "");
if (isByRef(elem_ty, mod)) {
- if (self.canElideLoad(body_tail))
- return ptr;
-
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
+ if (self.canElideLoad(body_tail)) return ptr;
+ const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+ return self.loadByRef(ptr, elem_ty, elem_alignment, false);
}
return self.load(ptr, ptr_ty);
}
- fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(mod);
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return (try o.lowerPtrToVoid(ptr_ty)).toValue();
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -5845,17 +6214,14 @@ pub const FuncGen = struct {
if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr;
const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty);
- if (ptr_ty.isSinglePointer(mod)) {
+ return try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod))
// If this is a single-item pointer to an array, we need another index in the GEP.
- const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
- return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- } else {
- const indices: [1]*llvm.Value = .{rhs};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- }
+ &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs }
+ else
+ &.{rhs}, "");
}
- fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ptr = try self.resolveInst(struct_field.struct_operand);
@@ -5867,14 +6233,14 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
field_index: u32,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolveInst(ty_op.operand);
const struct_ptr_ty = self.typeOf(ty_op.operand);
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
}
- fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -5884,9 +6250,7 @@ pub const FuncGen = struct {
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return null;
- }
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
if (!isByRef(struct_ty, mod)) {
assert(!isByRef(field_ty, mod));
@@ -5896,25 +6260,26 @@ pub const FuncGen = struct {
const struct_obj = mod.typeToStruct(struct_ty).?;
const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
const containing_int = struct_llvm_val;
- const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
- const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
+ const shift_amt =
+ try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
+ const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const truncated_int =
+ try self.wip.cast(.trunc, shifted_value, same_size_int, "");
+ return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const truncated_int =
+ try self.wip.cast(.trunc, shifted_value, same_size_int, "");
+ return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
}
- return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
+ return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, "");
},
else => {
const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index;
- return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
+ return self.wip.extractValue(struct_llvm_val, &.{llvm_field_index}, "");
},
},
.Union => {
@@ -5922,17 +6287,17 @@ pub const FuncGen = struct {
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
- const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
- return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const truncated_int =
+ try self.wip.cast(.trunc, containing_int, same_size_int, "");
+ return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(mod)) {
- const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
- return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
+ const truncated_int =
+ try self.wip.cast(.trunc, containing_int, same_size_int, "");
+ return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
}
- return self.builder.buildTrunc(containing_int, elem_llvm_ty, "");
+ return self.wip.cast(.trunc, containing_int, elem_llvm_ty, "");
},
else => unreachable,
}
@@ -5943,7 +6308,8 @@ pub const FuncGen = struct {
assert(struct_ty.containerLayout(mod) != .Packed);
const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try o.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
+ const field_ptr =
+ try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.child = llvm_field.ty.toIntern(),
.flags = .{
@@ -5955,7 +6321,8 @@ pub const FuncGen = struct {
return field_ptr;
assert(llvm_field.alignment != 0);
- return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false);
+ const field_alignment = Builder.Alignment.fromByteUnits(llvm_field.alignment);
+ return self.loadByRef(field_ptr, field_ty, field_alignment, false);
} else {
return self.load(field_ptr, field_ptr_ty);
}
@@ -5964,22 +6331,22 @@ pub const FuncGen = struct {
const union_llvm_ty = try o.lowerType(struct_ty);
const layout = struct_ty.unionGetLayout(mod);
const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
- const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, "");
+ const field_ptr =
+ try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
const llvm_field_ty = try o.lowerType(field_ty);
+ const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align);
if (isByRef(field_ty, mod)) {
- if (canElideLoad(self, body_tail))
- return field_ptr;
-
- return self.loadByRef(field_ptr, field_ty, layout.payload_align, false);
+ if (canElideLoad(self, body_tail)) return field_ptr;
+ return self.loadByRef(field_ptr, field_ty, payload_alignment, false);
} else {
- return self.builder.buildLoad(llvm_field_ty, field_ptr, "");
+ return self.wip.load(.normal, llvm_field_ty, field_ptr, payload_alignment, "");
}
},
else => unreachable,
}
}
- fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5987,50 +6354,52 @@ pub const FuncGen = struct {
const field_ptr = try self.resolveInst(extra.field_ptr);
- const target = o.module.getTarget();
const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod);
const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
+ if (field_offset == 0) return field_ptr;
const res_ty = try o.lowerType(self.air.getRefType(ty_pl.ty));
- if (field_offset == 0) {
- return field_ptr;
- }
- const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
+ const llvm_usize = try o.lowerType(Type.usize);
- const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, "");
- const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), "");
- return self.builder.buildIntToPtr(base_ptr_int, res_ty, "");
+ const field_ptr_int = try self.wip.cast(.ptrtoint, field_ptr, llvm_usize, "");
+ const base_ptr_int = try self.wip.bin(
+ .@"sub nuw",
+ field_ptr_int,
+ try o.builder.intValue(llvm_usize, field_offset),
+ "",
+ );
+ return self.wip.cast(.inttoptr, base_ptr_int, res_ty, "");
}
- fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airNot(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- return self.builder.buildNot(operand, "");
+ return self.wip.not(operand, "");
}
- fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
+ fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
- _ = self.builder.buildUnreachable();
- return null;
+ _ = try self.wip.@"unreachable"();
+ return .none;
}
- fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
- const di_scope = self.di_scope orelse return null;
+ fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const di_scope = self.di_scope orelse return .none;
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1));
- self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1));
+ self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1);
+ self.prev_dbg_column = @intCast(dbg_stmt.column + 1);
const inlined_at = if (self.dbg_inlined.items.len > 0)
self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
else
null;
self.builder.setCurrentDebugLocation(self.prev_dbg_line, self.prev_dbg_column, di_scope, inlined_at);
- return null;
+ return .none;
}
- fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const dib = o.di_builder orelse return null;
+ const dib = o.di_builder orelse return .none;
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = o.module;
@@ -6083,12 +6452,12 @@ pub const FuncGen = struct {
const lexical_block = dib.createLexicalBlock(subprogram.toScope(), di_file, line_number, 1);
self.di_scope = lexical_block.toScope();
self.base_line = decl.src_line;
- return null;
+ return .none;
}
- fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- if (o.di_builder == null) return null;
+ if (o.di_builder == null) return .none;
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = o.module;
@@ -6098,30 +6467,30 @@ pub const FuncGen = struct {
const old = self.dbg_inlined.pop();
self.di_scope = old.scope;
self.base_line = old.base_line;
- return null;
+ return .none;
}
- fn airDbgBlockBegin(self: *FuncGen) !?*llvm.Value {
+ fn airDbgBlockBegin(self: *FuncGen) !Builder.Value {
const o = self.dg.object;
- const dib = o.di_builder orelse return null;
+ const dib = o.di_builder orelse return .none;
const old_scope = self.di_scope.?;
try self.dbg_block_stack.append(self.gpa, old_scope);
const lexical_block = dib.createLexicalBlock(old_scope, self.di_file.?, self.prev_dbg_line, self.prev_dbg_column);
self.di_scope = lexical_block.toScope();
- return null;
+ return .none;
}
- fn airDbgBlockEnd(self: *FuncGen) !?*llvm.Value {
+ fn airDbgBlockEnd(self: *FuncGen) !Builder.Value {
const o = self.dg.object;
- if (o.di_builder == null) return null;
+ if (o.di_builder == null) return .none;
self.di_scope = self.dbg_block_stack.pop();
- return null;
+ return .none;
}
- fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
- const dib = o.di_builder orelse return null;
+ const dib = o.di_builder orelse return .none;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
@@ -6141,22 +6510,20 @@ pub const FuncGen = struct {
else
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
- const insert_block = self.builder.getInsertBlock();
- _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block);
- return null;
+ const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
+ _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
+ return .none;
}
- fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const dib = o.di_builder orelse return null;
+ const dib = o.di_builder orelse return .none;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const operand_ty = self.typeOf(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
- if (needDbgVarWorkaround(o)) {
- return null;
- }
+ if (needDbgVarWorkaround(o)) return .none;
const di_local_var = dib.createAutoVariable(
self.di_scope.?,
@@ -6172,23 +6539,22 @@ pub const FuncGen = struct {
else
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
- const insert_block = self.builder.getInsertBlock();
+ const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
const mod = o.module;
if (isByRef(operand_ty, mod)) {
- _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block);
+ _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = operand_ty.abiAlignment(mod);
- const alloca = self.buildAlloca(operand.typeOf(), alignment);
- const store_inst = self.builder.buildStore(operand, alloca);
- store_inst.setAlignment(alignment);
- _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block);
+ const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
+ const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, operand, alloca, alignment);
+ _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else {
- _ = dib.insertDbgValueIntrinsicAtEnd(operand, di_local_var, debug_loc, insert_block);
+ _ = dib.insertDbgValueIntrinsicAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
}
- return null;
+ return .none;
}
- fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
// Eventually, the Zig compiler needs to be reworked to have inline
// assembly go through the same parsing code regardless of backend, and
// have LLVM-flavored inline assembly be *output* from that assembler.
@@ -6199,12 +6565,12 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
- const clobbers_len = @as(u31, @truncate(extra.data.flags));
+ const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
@@ -6217,15 +6583,15 @@ pub const FuncGen = struct {
// The exact number of return / parameter values depends on which output values
// are passed by reference as indirect outputs (determined below).
const max_return_count = outputs.len;
- const llvm_ret_types = try arena.alloc(*llvm.Type, max_return_count);
+ const llvm_ret_types = try arena.alloc(Builder.Type, max_return_count);
const llvm_ret_indirect = try arena.alloc(bool, max_return_count);
const max_param_count = inputs.len + outputs.len;
- const llvm_param_types = try arena.alloc(*llvm.Type, max_param_count);
+ const llvm_param_types = try arena.alloc(Builder.Type, max_param_count);
const llvm_param_values = try arena.alloc(*llvm.Value, max_param_count);
// This stores whether we need to add an elementtype attribute and
// if so, the element type itself.
- const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count);
+ const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count);
const mod = o.module;
const target = mod.getTarget();
@@ -6262,8 +6628,8 @@ pub const FuncGen = struct {
// Pass the result by reference as an indirect output (e.g. "=*m")
llvm_constraints.appendAssumeCapacity('*');
- llvm_param_values[llvm_param_i] = output_inst;
- llvm_param_types[llvm_param_i] = output_inst.typeOf();
+ llvm_param_values[llvm_param_i] = output_inst.toLlvm(&self.wip);
+ llvm_param_types[llvm_param_i] = output_inst.typeOfWip(&self.wip);
llvm_param_attrs[llvm_param_i] = elem_llvm_ty;
llvm_param_i += 1;
} else {
@@ -6308,31 +6674,30 @@ pub const FuncGen = struct {
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.typeOf(input);
- var llvm_elem_ty: ?*llvm.Type = null;
+ var llvm_elem_ty: Builder.Type = .none;
if (isByRef(arg_ty, mod)) {
llvm_elem_ty = try o.lowerPtrElemTy(arg_ty);
if (constraintAllowsMemory(constraint)) {
- llvm_param_values[llvm_param_i] = arg_llvm_value;
- llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
+ llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip);
+ llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(mod);
+ const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
const arg_llvm_ty = try o.lowerType(arg_ty);
- const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, "");
- load_inst.setAlignment(alignment);
- llvm_param_values[llvm_param_i] = load_inst;
+ const load_inst =
+ try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
+ llvm_param_values[llvm_param_i] = load_inst.toLlvm(&self.wip);
llvm_param_types[llvm_param_i] = arg_llvm_ty;
}
} else {
if (constraintAllowsRegister(constraint)) {
- llvm_param_values[llvm_param_i] = arg_llvm_value;
- llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
+ llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip);
+ llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
- const alignment = arg_ty.abiAlignment(mod);
- const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment);
- const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr);
- store_inst.setAlignment(alignment);
- llvm_param_values[llvm_param_i] = arg_ptr;
- llvm_param_types[llvm_param_i] = arg_ptr.typeOf();
+ const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
+ const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
+ llvm_param_values[llvm_param_i] = arg_ptr.toLlvm(&self.wip);
+ llvm_param_types[llvm_param_i] = arg_ptr.typeOfWip(&self.wip);
}
}
@@ -6356,10 +6721,12 @@ pub const FuncGen = struct {
// In the case of indirect inputs, LLVM requires the callsite to have
// an elementtype(<ty>) attribute.
if (constraint[0] == '*') {
- llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse
+ llvm_param_attrs[llvm_param_i] = if (llvm_elem_ty != .none)
+ llvm_elem_ty
+ else
try o.lowerPtrElemTy(arg_ty.childType(mod));
} else {
- llvm_param_attrs[llvm_param_i] = null;
+ llvm_param_attrs[llvm_param_i] = .none;
}
llvm_param_i += 1;
@@ -6477,23 +6844,14 @@ pub const FuncGen = struct {
}
const ret_llvm_ty = switch (return_count) {
- 0 => self.context.voidType(),
+ 0 => .void,
1 => llvm_ret_types[0],
- else => self.context.structType(
- llvm_ret_types.ptr,
- @as(c_uint, @intCast(return_count)),
- .False,
- ),
+ else => try o.builder.structType(.normal, llvm_ret_types),
};
- const llvm_fn_ty = llvm.functionType(
- ret_llvm_ty,
- llvm_param_types.ptr,
- @as(c_uint, @intCast(param_count)),
- .False,
- );
+ const llvm_fn_ty = try o.builder.fnType(ret_llvm_ty, llvm_param_types[0..param_count], .normal);
const asm_fn = llvm.getInlineAsm(
- llvm_fn_ty,
+ llvm_fn_ty.toLlvm(&o.builder),
rendered_template.items.ptr,
rendered_template.items.len,
llvm_constraints.items.ptr,
@@ -6503,18 +6861,18 @@ pub const FuncGen = struct {
.ATT,
.False,
);
- const call = self.builder.buildCall(
- llvm_fn_ty,
+ const call = (try self.wip.unimplemented(ret_llvm_ty, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
asm_fn,
llvm_param_values.ptr,
- @as(c_uint, @intCast(param_count)),
+ @intCast(param_count),
.C,
.Auto,
"",
- );
+ ), &self.wip);
for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| {
- if (llvm_elem_ty) |llvm_ty| {
- llvm.setCallElemTypeAttr(call, i, llvm_ty);
+ if (llvm_elem_ty != .none) {
+ llvm.setCallElemTypeAttr(call.toLlvm(&self.wip), i, llvm_elem_ty.toLlvm(&o.builder));
}
}
@@ -6523,16 +6881,17 @@ pub const FuncGen = struct {
for (outputs, 0..) |output, i| {
if (llvm_ret_indirect[i]) continue;
- const output_value = if (return_count > 1) b: {
- break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), "");
- } else call;
+ const output_value = if (return_count > 1)
+ try self.wip.extractValue(call, &[_]u32{@intCast(llvm_ret_i)}, "")
+ else
+ call;
if (output != .none) {
const output_ptr = try self.resolveInst(output);
const output_ptr_ty = self.typeOf(output);
- const store_inst = self.builder.buildStore(output_value, output_ptr);
- store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod));
+ const alignment = Builder.Alignment.fromByteUnits(output_ptr_ty.ptrAlignment(mod));
+ _ = try self.wip.store(.normal, output_value, output_ptr, alignment);
} else {
ret_val = output_value;
}
@@ -6546,8 +6905,8 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
operand_is_ptr: bool,
- pred: llvm.IntPredicate,
- ) !?*llvm.Value {
+ cond: Builder.IntegerCondition,
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -6558,43 +6917,40 @@ pub const FuncGen = struct {
const payload_ty = optional_ty.optionalChild(mod);
if (optional_ty.optionalReprIsPayload(mod)) {
const loaded = if (operand_is_ptr)
- self.builder.buildLoad(optional_llvm_ty, operand, "")
+ try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
else
operand;
if (payload_ty.isSlice(mod)) {
- const slice_ptr = self.builder.buildExtractValue(loaded, 0, "");
- const ptr_ty = try o.lowerType(payload_ty.slicePtrFieldType(mod));
- return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), "");
+ const slice_ptr = try self.wip.extractValue(loaded, &.{0}, "");
+ const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace(
+ payload_ty.ptrAddressSpace(mod),
+ mod.getTarget(),
+ ));
+ return self.wip.icmp(cond, slice_ptr, try o.builder.nullValue(ptr_ty), "");
}
- return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), "");
+ return self.wip.icmp(cond, loaded, try o.builder.zeroInitValue(optional_llvm_ty), "");
}
comptime assert(optional_layout_version == 3);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
- self.builder.buildLoad(optional_llvm_ty, operand, "")
+ try self.wip.load(.normal, optional_llvm_ty, operand, .default, "")
else
operand;
- const llvm_i8 = self.context.intType(8);
- return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), "");
+ return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), "");
}
const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod);
- const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref);
- if (pred == .EQ) {
- return self.builder.buildNot(non_null_bit, "");
- } else {
- return non_null_bit;
- }
+ return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref);
}
fn airIsErr(
self: *FuncGen,
inst: Air.Inst.Index,
- op: llvm.IntPredicate,
+ cond: Builder.IntegerCondition,
operand_is_ptr: bool,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -6602,40 +6958,37 @@ pub const FuncGen = struct {
const operand_ty = self.typeOf(un_op);
const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const payload_ty = err_union_ty.errorUnionPayload(mod);
- const err_set_ty = try o.lowerType(Type.anyerror);
- const zero = err_set_ty.constNull();
+ const zero = try o.builder.intValue(Builder.Type.err_int, 0);
if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
- const llvm_i1 = self.context.intType(1);
- switch (op) {
- .EQ => return llvm_i1.constInt(1, .False), // 0 == 0
- .NE => return llvm_i1.constInt(0, .False), // 0 != 0
+ const val: Builder.Constant = switch (cond) {
+ .eq => .true, // 0 == 0
+ .ne => .false, // 0 != 0
else => unreachable,
- }
+ };
+ return val.toValue();
}
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
- self.builder.buildLoad(try o.lowerType(err_union_ty), operand, "")
+ try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "")
else
operand;
- return self.builder.buildICmp(op, loaded, zero, "");
+ return self.wip.icmp(cond, loaded, zero, "");
}
const err_field_index = errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty, mod)) {
+ const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
- const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, "");
- const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, "");
- return self.builder.buildICmp(op, loaded, zero, "");
- }
-
- const loaded = self.builder.buildExtractValue(operand, err_field_index, "");
- return self.builder.buildICmp(op, loaded, zero, "");
+ const err_field_ptr =
+ try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, "");
+ break :loaded try self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, "");
+ } else try self.wip.extractValue(operand, &.{err_field_index}, "");
+ return self.wip.icmp(cond, loaded, zero, "");
}
- fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6651,11 +7004,10 @@ pub const FuncGen = struct {
// The payload and the optional are the same value.
return operand;
}
- const optional_llvm_ty = try o.lowerType(optional_ty);
- return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, "");
+ return self.wip.gepStruct(try o.lowerType(optional_ty), operand, 0, "");
}
- fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
comptime assert(optional_layout_version == 3);
const o = self.dg.object;
@@ -6664,10 +7016,10 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = optional_ty.optionalChild(mod);
- const non_null_bit = self.context.intType(8).constInt(1, .False);
+ const non_null_bit = try o.builder.intValue(.i8, 1);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
- _ = self.builder.buildStore(non_null_bit, operand);
+ _ = try self.wip.store(.normal, non_null_bit, operand, .default);
return operand;
}
if (optional_ty.optionalReprIsPayload(mod)) {
@@ -6678,18 +7030,17 @@ pub const FuncGen = struct {
// First set the non-null bit.
const optional_llvm_ty = try o.lowerType(optional_ty);
- const non_null_ptr = self.builder.buildStructGEP(optional_llvm_ty, operand, 1, "");
+ const non_null_ptr = try self.wip.gepStruct(optional_llvm_ty, operand, 1, "");
// TODO set alignment on this store
- _ = self.builder.buildStore(non_null_bit, non_null_ptr);
+ _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default);
// Then return the payload pointer (only if it's used).
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return .none;
- return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, "");
+ return self.wip.gepStruct(optional_llvm_ty, operand, 0, "");
}
- fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -6697,7 +7048,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
if (optional_ty.optionalReprIsPayload(mod)) {
// Payload value is the same as the optional value.
@@ -6713,7 +7064,7 @@ pub const FuncGen = struct {
self: *FuncGen,
body_tail: []const Air.Inst.Index,
operand_is_ptr: bool,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -6725,32 +7076,30 @@ pub const FuncGen = struct {
const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return if (operand_is_ptr) operand else null;
+ return if (operand_is_ptr) operand else .none;
}
const offset = errUnionPayloadOffset(payload_ty, mod);
const err_union_llvm_ty = try o.lowerType(err_union_ty);
if (operand_is_ptr) {
- return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
+ return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
} else if (isByRef(err_union_ty, mod)) {
- const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
+ const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
+ const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
if (isByRef(payload_ty, mod)) {
- if (self.canElideLoad(body_tail))
- return payload_ptr;
-
- return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
+ if (self.canElideLoad(body_tail)) return payload_ptr;
+ return self.loadByRef(payload_ptr, payload_ty, payload_alignment, false);
}
- const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(mod));
- return load_inst;
+ const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset];
+ return self.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, "");
}
- return self.builder.buildExtractValue(operand, offset, "");
+ return self.wip.extractValue(operand, &.{offset}, "");
}
fn airErrUnionErr(
self: *FuncGen,
inst: Air.Inst.Index,
operand_is_ptr: bool,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6758,34 +7107,31 @@ pub const FuncGen = struct {
const operand_ty = self.typeOf(ty_op.operand);
const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
- const err_llvm_ty = try o.lowerType(Type.anyerror);
if (operand_is_ptr) {
return operand;
} else {
- return err_llvm_ty.constInt(0, .False);
+ return o.builder.intValue(Builder.Type.err_int, 0);
}
}
- const err_set_llvm_ty = try o.lowerType(Type.anyerror);
-
const payload_ty = err_union_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (!operand_is_ptr) return operand;
- return self.builder.buildLoad(err_set_llvm_ty, operand, "");
+ return self.wip.load(.normal, Builder.Type.err_int, operand, .default, "");
}
const offset = errUnionErrorOffset(payload_ty, mod);
if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_union_llvm_ty = try o.lowerType(err_union_ty);
- const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
- return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, "");
+ const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
+ return self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, "");
}
- return self.builder.buildExtractValue(operand, offset, "");
+ return self.wip.extractValue(operand, &.{offset}, "");
}
- fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6793,49 +7139,49 @@ pub const FuncGen = struct {
const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
- const non_error_val = try o.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) });
+ const non_error_val = try o.builder.intValue(Builder.Type.err_int, 0);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- _ = self.builder.buildStore(non_error_val, operand);
+ _ = try self.wip.store(.normal, non_error_val, operand, .default);
return operand;
}
const err_union_llvm_ty = try o.lowerType(err_union_ty);
{
+ const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
const error_offset = errUnionErrorOffset(payload_ty, mod);
// First set the non-error value.
- const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, "");
- const store_inst = self.builder.buildStore(non_error_val, non_null_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
+ const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
+ _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment);
}
// Then return the payload pointer (only if it is used).
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return .none;
const payload_offset = errUnionPayloadOffset(payload_ty, mod);
- return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, "");
+ return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, "");
}
- fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !?*llvm.Value {
- return self.err_ret_trace.?;
+ fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !Builder.Value {
+ assert(self.err_ret_trace != .none);
+ return self.err_ret_trace;
}
- fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const operand = try self.resolveInst(un_op);
- self.err_ret_trace = operand;
- return null;
+ self.err_ret_trace = try self.resolveInst(un_op);
+ return .none;
}
- fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- //const struct_ty = try self.resolveInst(ty_pl.ty);
const struct_ty = self.air.getRefType(ty_pl.ty);
const field_index = ty_pl.payload;
const mod = o.module;
const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try o.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
+ assert(self.err_ret_trace != .none);
+ const field_ptr =
+ try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.child = llvm_field.ty.toIntern(),
.flags = .{
@@ -6845,34 +7191,32 @@ pub const FuncGen = struct {
return self.load(field_ptr, field_ptr_ty);
}
- fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.typeOf(ty_op.operand);
- const non_null_bit = self.context.intType(8).constInt(1, .False);
+ const non_null_bit = try o.builder.intValue(.i8, 1);
comptime assert(optional_layout_version == 3);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload(mod)) {
- return operand;
- }
+ if (optional_ty.optionalReprIsPayload(mod)) return operand;
const llvm_optional_ty = try o.lowerType(optional_ty);
if (isByRef(optional_ty, mod)) {
- const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod));
- const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, "");
+ const alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
+ const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment);
+ const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
- try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
- const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, "");
- _ = self.builder.buildStore(non_null_bit, non_null_ptr);
+ try self.store(payload_ptr, payload_ptr_ty, operand, .none);
+ const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, "");
+ _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default);
return optional_ptr;
}
- const partial = self.builder.buildInsertValue(llvm_optional_ty.getUndef(), operand, 0, "");
- return self.builder.buildInsertValue(partial, non_null_bit, 1, "");
+ return self.wip.buildAggregate(llvm_optional_ty, &.{ operand, non_null_bit }, "");
}
- fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6882,46 +7226,47 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return operand;
}
- const ok_err_code = (try o.lowerType(Type.anyerror)).constNull();
+ const ok_err_code = try o.builder.intValue(Builder.Type.err_int, 0);
const err_un_llvm_ty = try o.lowerType(err_un_ty);
const payload_offset = errUnionPayloadOffset(payload_ty, mod);
const error_offset = errUnionErrorOffset(payload_ty, mod);
if (isByRef(err_un_ty, mod)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
- const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
- const store_inst = self.builder.buildStore(ok_err_code, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
- const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
+ const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
+ const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
+ const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
+ const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
+ _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
+ const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
- try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
+ try self.store(payload_ptr, payload_ptr_ty, operand, .none);
return result_ptr;
}
-
- const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, error_offset, "");
- return self.builder.buildInsertValue(partial, operand, payload_offset, "");
+ var fields: [2]Builder.Value = undefined;
+ fields[payload_offset] = operand;
+ fields[error_offset] = ok_err_code;
+ return self.wip.buildAggregate(err_un_llvm_ty, &fields, "");
}
- fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return operand;
- }
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand;
const err_un_llvm_ty = try o.lowerType(err_un_ty);
const payload_offset = errUnionPayloadOffset(payload_ty, mod);
const error_offset = errUnionErrorOffset(payload_ty, mod);
if (isByRef(err_un_ty, mod)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
- const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
- const store_inst = self.builder.buildStore(operand, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
- const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
+ const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
+ const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
+ const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
+ const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
+ _ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
+ const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
// TODO store undef to payload_ptr
_ = payload_ptr;
@@ -6929,34 +7274,52 @@ pub const FuncGen = struct {
return result_ptr;
}
- const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, error_offset, "");
// TODO set payload bytes to undef
- return partial;
+ const undef = try o.builder.undefValue(err_un_llvm_ty);
+ return self.wip.insertValue(undef, operand, &.{error_offset}, "");
}
- fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const o = self.dg.object;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
- const llvm_u32 = self.context.intType(32);
- const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size", &.{llvm_u32});
- const args: [1]*llvm.Value = .{llvm_u32.constInt(index, .False)};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.size", &.{.i32});
+ const args: [1]*llvm.Value = .{
+ (try o.builder.intConst(.i32, index)).toLlvm(&o.builder),
+ };
+ return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.i32, &.{.i32}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const o = self.dg.object;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
const operand = try self.resolveInst(pl_op.operand);
- const llvm_u32 = self.context.intType(32);
- const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow", &.{llvm_u32});
+ const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32});
const args: [2]*llvm.Value = .{
- llvm_u32.constInt(index, .False),
- operand,
+ (try o.builder.intConst(.i32, index)).toLlvm(&o.builder),
+ operand.toLlvm(&self.wip),
};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.i32, &.{ .i32, .i32 }, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const data = self.air.instructions.items(.data)[inst].vector_store_elem;
@@ -6967,19 +7330,20 @@ pub const FuncGen = struct {
const index = try self.resolveInst(extra.lhs);
const operand = try self.resolveInst(extra.rhs);
- const loaded_vector = blk: {
- const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
- const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, "");
- load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod));
- load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod)));
- break :blk load_inst;
+ const kind: Builder.MemoryAccessKind = switch (vector_ptr_ty.isVolatilePtr(mod)) {
+ false => .normal,
+ true => .@"volatile",
};
- const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, "");
- try self.store(vector_ptr, vector_ptr_ty, modified_vector, .NotAtomic);
- return null;
+ const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
+ const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod));
+ const loaded = try self.wip.load(kind, elem_llvm_ty, vector_ptr, alignment, "");
+
+ const new_vector = try self.wip.insertElement(loaded, operand, index, "");
+ _ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none);
+ return .none;
}
- fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6988,11 +7352,13 @@ pub const FuncGen = struct {
const scalar_ty = self.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, "");
- return self.builder.buildUMin(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"llvm.smin."
+ else
+ .@"llvm.umin.", lhs, rhs, "");
}
- fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7001,26 +7367,23 @@ pub const FuncGen = struct {
const scalar_ty = self.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, "");
- return self.builder.buildUMax(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"llvm.smax."
+ else
+ .@"llvm.umax.", lhs, rhs, "");
}
- fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst);
- const llvm_slice_ty = try o.lowerType(inst_ty);
-
- // In case of slicing a global, the result type looks something like `{ i8*, i64 }`
- // but `ptr` is pointing to the global directly.
- const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, "");
- return self.builder.buildInsertValue(partial, len, 1, "");
+ return self.wip.buildAggregate(try o.lowerType(inst_ty), &.{ ptr, len }, "");
}
- fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7032,8 +7395,7 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, "");
- return self.builder.buildNUWAdd(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, "");
}
fn airSafeArithmetic(
@@ -7041,7 +7403,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = fg.dg.object;
const mod = o.module;
@@ -7057,42 +7419,50 @@ pub const FuncGen = struct {
false => unsigned_intrinsic,
};
const llvm_inst_ty = try o.lowerType(inst_ty);
- const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty});
- const result_struct = fg.builder.buildCall(
- llvm_fn.globalGetValueType(),
+ const llvm_ret_ty = try o.builder.structType(.normal, &.{
+ llvm_inst_ty,
+ try llvm_inst_ty.changeScalar(.i1, &o.builder),
+ });
+ const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_inst_ty, llvm_inst_ty }, .normal);
+ const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty});
+ const result_struct = (try fg.wip.unimplemented(llvm_ret_ty, "")).finish(fg.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
llvm_fn,
- &[_]*llvm.Value{ lhs, rhs },
+ &[_]*llvm.Value{ lhs.toLlvm(&fg.wip), rhs.toLlvm(&fg.wip) },
2,
.Fast,
.Auto,
"",
- );
- const overflow_bit = fg.builder.buildExtractValue(result_struct, 1, "");
+ ), &fg.wip);
+ const overflow_bit = try fg.wip.extractValue(result_struct, &.{1}, "");
const scalar_overflow_bit = switch (is_scalar) {
true => overflow_bit,
- false => fg.builder.buildOrReduce(overflow_bit),
+ false => (try fg.wip.unimplemented(.i1, "")).finish(
+ fg.builder.buildOrReduce(overflow_bit.toLlvm(&fg.wip)),
+ &fg.wip,
+ ),
};
- const fail_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowFail");
- const ok_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowOk");
- _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block, ok_block);
+ const fail_block = try fg.wip.block(1, "OverflowFail");
+ const ok_block = try fg.wip.block(1, "OverflowOk");
+ _ = try fg.wip.brCond(scalar_overflow_bit, fail_block, ok_block);
- fg.builder.positionBuilderAtEnd(fail_block);
+ fg.wip.cursor = .{ .block = fail_block };
try fg.buildSimplePanic(.integer_overflow);
- fg.builder.positionBuilderAtEnd(ok_block);
- return fg.builder.buildExtractValue(result_struct, 0, "");
+ fg.wip.cursor = .{ .block = ok_block };
+ return fg.wip.extractValue(result_struct, &.{0}, "");
}
- fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildAdd(lhs, rhs, "");
+ return self.wip.bin(.add, lhs, rhs, "");
}
- fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7102,12 +7472,13 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, "");
-
- return self.builder.buildUAddSat(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"llvm.sadd.sat."
+ else
+ .@"llvm.uadd.sat.", lhs, rhs, "");
}
- fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7119,19 +7490,18 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, "");
- return self.builder.buildNUWSub(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, "");
}
- fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildSub(lhs, rhs, "");
+ return self.wip.bin(.sub, lhs, rhs, "");
}
- fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7141,11 +7511,13 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, "");
- return self.builder.buildUSubSat(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"llvm.ssub.sat."
+ else
+ .@"llvm.usub.sat.", lhs, rhs, "");
}
- fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7157,19 +7529,18 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, "");
- return self.builder.buildNUWMul(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, "");
}
- fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildMul(lhs, rhs, "");
+ return self.wip.bin(.mul, lhs, rhs, "");
}
- fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7179,11 +7550,13 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, "");
- return self.builder.buildUMulFixSat(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"llvm.smul.fix.sat."
+ else
+ .@"llvm.umul.fix.sat.", lhs, rhs, "");
}
- fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7194,7 +7567,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
}
- fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7209,11 +7582,10 @@ pub const FuncGen = struct {
const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.trunc, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, "");
- return self.builder.buildUDiv(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, "");
}
- fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7230,31 +7602,24 @@ pub const FuncGen = struct {
}
if (scalar_ty.isSignedInt(mod)) {
const inst_llvm_ty = try o.lowerType(inst_ty);
- const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen(mod);
- const scalar_llvm_ty = try o.lowerType(scalar_ty);
-
- const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
- defer self.gpa.free(shifts);
-
- @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False));
- break :const_vector llvm.constVector(shifts.ptr, vec_len);
- } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False);
+ const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
+ inst_llvm_ty.scalarType(&o.builder),
+ inst_llvm_ty.scalarBits(&o.builder) - 1,
+ ));
- const div = self.builder.buildSDiv(lhs, rhs, "");
- const rem = self.builder.buildSRem(lhs, rhs, "");
- const div_sign = self.builder.buildXor(lhs, rhs, "");
- const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, "");
- const zero = inst_llvm_ty.constNull();
- const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, "");
- const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero, "");
- return self.builder.buildNSWAdd(div, correction, "");
+ const div = try self.wip.bin(.sdiv, lhs, rhs, "");
+ const rem = try self.wip.bin(.srem, lhs, rhs, "");
+ const div_sign = try self.wip.bin(.xor, lhs, rhs, "");
+ const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, "");
+ const zero = try o.builder.zeroInitValue(inst_llvm_ty);
+ const rem_nonzero = try self.wip.icmp(.ne, rem, zero, "");
+ const correction = try self.wip.select(rem_nonzero, div_sign_mask, zero, "");
+ return self.wip.bin(.@"add nsw", div, correction, "");
}
- return self.builder.buildUDiv(lhs, rhs, "");
+ return self.wip.bin(.udiv, lhs, rhs, "");
}
- fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7266,11 +7631,13 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, "");
- return self.builder.buildExactUDiv(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .@"sdiv exact"
+ else
+ .@"udiv exact", lhs, rhs, "");
}
- fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7282,11 +7649,13 @@ pub const FuncGen = struct {
const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, "");
- return self.builder.buildURem(lhs, rhs, "");
+ return self.wip.bin(if (scalar_ty.isSignedInt(mod))
+ .srem
+ else
+ .urem, lhs, rhs, "");
}
- fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
@@ -7302,36 +7671,29 @@ pub const FuncGen = struct {
const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs });
const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs });
- const zero = inst_llvm_ty.constNull();
+ const zero = try o.builder.zeroInitValue(inst_llvm_ty);
const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero });
- return self.builder.buildSelect(ltz, c, a, "");
+ return self.wip.select(ltz, c, a, "");
}
if (scalar_ty.isSignedInt(mod)) {
- const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen(mod);
- const scalar_llvm_ty = try o.lowerType(scalar_ty);
-
- const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
- defer self.gpa.free(shifts);
+ const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst(
+ inst_llvm_ty.scalarType(&o.builder),
+ inst_llvm_ty.scalarBits(&o.builder) - 1,
+ ));
- @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False));
- break :const_vector llvm.constVector(shifts.ptr, vec_len);
- } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False);
-
- const rem = self.builder.buildSRem(lhs, rhs, "");
- const div_sign = self.builder.buildXor(lhs, rhs, "");
- const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, "");
- const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, "");
- const zero = inst_llvm_ty.constNull();
- const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, "");
- const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero, "");
- return self.builder.buildNSWAdd(rem, correction, "");
+ const rem = try self.wip.bin(.srem, lhs, rhs, "");
+ const div_sign = try self.wip.bin(.xor, lhs, rhs, "");
+ const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, "");
+ const rhs_masked = try self.wip.bin(.@"and", rhs, div_sign_mask, "");
+ const zero = try o.builder.zeroInitValue(inst_llvm_ty);
+ const rem_nonzero = try self.wip.icmp(.ne, rem, zero, "");
+ const correction = try self.wip.select(rem_nonzero, rhs_masked, zero, "");
+ return self.wip.bin(.@"add nsw", rem, correction, "");
}
- return self.builder.buildURem(lhs, rhs, "");
+ return self.wip.bin(.urem, lhs, rhs, "");
}
- fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -7341,49 +7703,37 @@ pub const FuncGen = struct {
const ptr_ty = self.typeOf(bin_op.lhs);
const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
switch (ptr_ty.ptrSize(mod)) {
- .One => {
- // It's a pointer to an array, so according to LLVM we need an extra GEP index.
- const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset };
- return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
- },
- .C, .Many => {
- const indices: [1]*llvm.Value = .{offset};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
- },
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
+ try o.builder.intValue(try o.lowerType(Type.usize), 0), offset,
+ }, ""),
+ .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{offset}, ""),
.Slice => {
- const base = self.builder.buildExtractValue(ptr, 0, "");
- const indices: [1]*llvm.Value = .{offset};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, "");
+ const base = try self.wip.extractValue(ptr, &.{0}, "");
+ return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{offset}, "");
},
}
}
- fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
- const negative_offset = self.builder.buildNeg(offset, "");
+ const negative_offset = try self.wip.neg(offset, "");
const ptr_ty = self.typeOf(bin_op.lhs);
const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod));
switch (ptr_ty.ptrSize(mod)) {
- .One => {
- // It's a pointer to an array, so according to LLVM we need an extra GEP index.
- const indices: [2]*llvm.Value = .{
- self.context.intType(32).constNull(), negative_offset,
- };
- return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
- },
- .C, .Many => {
- const indices: [1]*llvm.Value = .{negative_offset};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, "");
- },
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{
+ try o.builder.intValue(try o.lowerType(Type.usize), 0), negative_offset,
+ }, ""),
+ .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{negative_offset}, ""),
.Slice => {
- const base = self.builder.buildExtractValue(ptr, 0, "");
- const indices: [1]*llvm.Value = .{negative_offset};
- return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, "");
+ const base = try self.wip.extractValue(ptr, &.{0}, "");
+ return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{negative_offset}, "");
},
}
}
@@ -7393,7 +7743,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -7408,81 +7758,123 @@ pub const FuncGen = struct {
const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
- const llvm_lhs_ty = try o.lowerType(lhs_ty);
const llvm_dest_ty = try o.lowerType(dest_ty);
+ const llvm_lhs_ty = try o.lowerType(lhs_ty);
- const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
- const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
+ const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
+ const llvm_ret_ty = try o.builder.structType(
+ .normal,
+ &.{ llvm_lhs_ty, try llvm_lhs_ty.changeScalar(.i1, &o.builder) },
+ );
+ const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_lhs_ty, llvm_lhs_ty }, .normal);
+ const result_struct = (try self.wip.unimplemented(llvm_ret_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &[_]*llvm.Value{ lhs.toLlvm(&self.wip), rhs.toLlvm(&self.wip) },
+ 2,
+ .Fast,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
- const result = self.builder.buildExtractValue(result_struct, 0, "");
- const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
+ const result = try self.wip.extractValue(result_struct, &.{0}, "");
+ const overflow_bit = try self.wip.extractValue(result_struct, &.{1}, "");
const result_index = llvmField(dest_ty, 0, mod).?.index;
const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
- const result_alignment = dest_ty.abiAlignment(mod);
- const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
+ const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod));
+ const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment);
{
- const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
- const store_inst = self.builder.buildStore(result, field_ptr);
- store_inst.setAlignment(result_alignment);
+ const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
+ _ = try self.wip.store(.normal, result, field_ptr, result_alignment);
}
{
- const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, "");
- const store_inst = self.builder.buildStore(overflow_bit, field_ptr);
- store_inst.setAlignment(1);
+ const overflow_alignment = comptime Builder.Alignment.fromByteUnits(1);
+ const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, "");
+ _ = try self.wip.store(.normal, overflow_bit, field_ptr, overflow_alignment);
}
return alloca_inst;
}
- const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, "");
- return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, "");
+ var fields: [2]Builder.Value = undefined;
+ fields[result_index] = result;
+ fields[overflow_index] = overflow_bit;
+ return self.wip.buildAggregate(llvm_dest_ty, &fields, "");
}
fn buildElementwiseCall(
self: *FuncGen,
- llvm_fn: *llvm.Value,
- args_vectors: []const *llvm.Value,
- result_vector: *llvm.Value,
+ llvm_fn: Builder.Function.Index,
+ args_vectors: []const Builder.Value,
+ result_vector: Builder.Value,
vector_len: usize,
- ) !*llvm.Value {
- const args_len = @as(c_uint, @intCast(args_vectors.len));
- const llvm_i32 = self.context.intType(32);
- assert(args_len <= 3);
+ ) !Builder.Value {
+ const o = self.dg.object;
+ assert(args_vectors.len <= 3);
+
+ const llvm_fn_ty = llvm_fn.typeOf(&o.builder);
+ const llvm_scalar_ty = llvm_fn_ty.functionReturn(&o.builder);
var i: usize = 0;
var result = result_vector;
while (i < vector_len) : (i += 1) {
- const index_i32 = llvm_i32.constInt(i, .False);
+ const index_i32 = try o.builder.intValue(.i32, i);
var args: [3]*llvm.Value = undefined;
- for (args_vectors, 0..) |arg_vector, k| {
- args[k] = self.builder.buildExtractElement(arg_vector, index_i32, "");
+ for (args[0..args_vectors.len], args_vectors) |*arg_elem, arg_vector| {
+ arg_elem.* = (try self.wip.extractElement(arg_vector, index_i32, "")).toLlvm(&self.wip);
}
- const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, "");
- result = self.builder.buildInsertElement(result, result_elem, index_i32, "");
+ const result_elem = (try self.wip.unimplemented(llvm_scalar_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &args,
+ @intCast(args_vectors.len),
+ .C,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
+ result = try self.wip.insertElement(result, result_elem, index_i32, "");
}
return result;
}
fn getLibcFunction(
self: *FuncGen,
- fn_name: [:0]const u8,
- param_types: []const *llvm.Type,
- return_type: *llvm.Type,
- ) *llvm.Value {
- const o = self.dg.object;
- return o.llvm_module.getNamedFunction(fn_name.ptr) orelse b: {
- const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len);
- break :b if (alias) |a| a.getAliasee() else null;
- } orelse b: {
- const params_len = @as(c_uint, @intCast(param_types.len));
- const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False);
- const f = o.llvm_module.addFunction(fn_name, fn_type);
- break :b f;
+ fn_name: Builder.String,
+ param_types: []const Builder.Type,
+ return_type: Builder.Type,
+ ) Allocator.Error!Builder.Function.Index {
+ const o = self.dg.object;
+ if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) {
+ .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function,
+ .function => |function| function,
+ else => unreachable,
+ };
+
+ const fn_type = try o.builder.fnType(return_type, param_types, .normal);
+ const f = o.llvm_module.addFunction(fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder));
+
+ var global = Builder.Global{
+ .type = fn_type,
+ .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) },
+ };
+ var function = Builder.Function{
+ .global = @enumFromInt(o.builder.globals.count()),
};
+
+ try o.builder.llvm.globals.append(self.gpa, f);
+ _ = try o.builder.addGlobal(fn_name, global);
+ try o.builder.functions.append(self.gpa, function);
+ return global.kind.function;
}
/// Creates a floating point comparison by lowering to the appropriate
@@ -7491,8 +7883,8 @@ pub const FuncGen = struct {
self: *FuncGen,
pred: math.CompareOperator,
ty: Type,
- params: [2]*llvm.Value,
- ) !*llvm.Value {
+ params: [2]Builder.Value,
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const target = o.module.getTarget();
@@ -7500,20 +7892,19 @@ pub const FuncGen = struct {
const scalar_llvm_ty = try o.lowerType(scalar_ty);
if (intrinsicsAllowed(scalar_ty, target)) {
- const llvm_predicate: llvm.RealPredicate = switch (pred) {
- .eq => .OEQ,
- .neq => .UNE,
- .lt => .OLT,
- .lte => .OLE,
- .gt => .OGT,
- .gte => .OGE,
+ const cond: Builder.FloatCondition = switch (pred) {
+ .eq => .oeq,
+ .neq => .une,
+ .lt => .olt,
+ .lte => .ole,
+ .gt => .ogt,
+ .gte => .oge,
};
- return self.builder.buildFCmp(llvm_predicate, params[0], params[1], "");
+ return self.wip.fcmp(cond, params[0], params[1], "");
}
const float_bits = scalar_ty.floatBits(target);
const compiler_rt_float_abbrev = compilerRtFloatAbbrev(float_bits);
- var fn_name_buf: [64]u8 = undefined;
const fn_base_name = switch (pred) {
.neq => "ne",
.eq => "eq",
@@ -7522,37 +7913,50 @@ pub const FuncGen = struct {
.gt => "gt",
.gte => "ge",
};
- const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f2", .{
- fn_base_name, compiler_rt_float_abbrev,
- }) catch unreachable;
-
- const param_types = [2]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty };
- const llvm_i32 = self.context.intType(32);
- const libc_fn = self.getLibcFunction(fn_name, param_types[0..], llvm_i32);
-
- const zero = llvm_i32.constInt(0, .False);
- const int_pred: llvm.IntPredicate = switch (pred) {
- .eq => .EQ,
- .neq => .NE,
- .lt => .SLT,
- .lte => .SLE,
- .gt => .SGT,
- .gte => .SGE,
+ const fn_name = try o.builder.fmt("__{s}{s}f2", .{ fn_base_name, compiler_rt_float_abbrev });
+
+ const libc_fn = try self.getLibcFunction(
+ fn_name,
+ ([1]Builder.Type{scalar_llvm_ty} ** 2)[0..],
+ .i32,
+ );
+
+ const zero = try o.builder.intConst(.i32, 0);
+ const int_cond: Builder.IntegerCondition = switch (pred) {
+ .eq => .eq,
+ .neq => .ne,
+ .lt => .slt,
+ .lte => .sle,
+ .gt => .sgt,
+ .gte => .sge,
};
if (ty.zigTypeTag(mod) == .Vector) {
const vec_len = ty.vectorLen(mod);
- const vector_result_ty = llvm_i32.vectorType(vec_len);
+ const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32);
- var result = vector_result_ty.getUndef();
- result = try self.buildElementwiseCall(libc_fn, &params, result, vec_len);
+ const init = try o.builder.poisonValue(vector_result_ty);
+ const result = try self.buildElementwiseCall(libc_fn, &params, init, vec_len);
- const zero_vector = self.builder.buildVectorSplat(vec_len, zero, "");
- return self.builder.buildICmp(int_pred, result, zero_vector, "");
+ const zero_vector = try o.builder.splatValue(vector_result_ty, zero);
+ return self.wip.icmp(int_cond, result, zero_vector, "");
}
- const result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, &params, params.len, .C, .Auto, "");
- return self.builder.buildICmp(int_pred, result, zero, "");
+ const llvm_fn_ty = libc_fn.typeOf(&o.builder);
+ const llvm_params = [2]*llvm.Value{ params[0].toLlvm(&self.wip), params[1].toLlvm(&self.wip) };
+ const result = (try self.wip.unimplemented(
+ llvm_fn_ty.functionReturn(&o.builder),
+ "",
+ )).finish(self.builder.buildCall(
+ libc_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ libc_fn.toLlvm(&o.builder),
+ &llvm_params,
+ llvm_params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
+ return self.wip.icmp(int_cond, result, zero.toValue(), "");
}
const FloatOp = enum {
@@ -7583,7 +7987,7 @@ pub const FuncGen = struct {
const FloatOpStrat = union(enum) {
intrinsic: []const u8,
- libc: [:0]const u8,
+ libc: Builder.String,
};
/// Creates a floating point operation (add, sub, fma, sqrt, exp, etc.)
@@ -7594,27 +7998,25 @@ pub const FuncGen = struct {
comptime op: FloatOp,
ty: Type,
comptime params_len: usize,
- params: [params_len]*llvm.Value,
- ) !*llvm.Value {
+ params: [params_len]Builder.Value,
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const target = mod.getTarget();
const scalar_ty = ty.scalarType(mod);
const llvm_ty = try o.lowerType(ty);
- const scalar_llvm_ty = try o.lowerType(scalar_ty);
const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target);
- var fn_name_buf: [64]u8 = undefined;
const strat: FloatOpStrat = if (intrinsics_allowed) switch (op) {
// Some operations are dedicated LLVM instructions, not available as intrinsics
- .neg => return self.builder.buildFNeg(params[0], ""),
- .add => return self.builder.buildFAdd(params[0], params[1], ""),
- .sub => return self.builder.buildFSub(params[0], params[1], ""),
- .mul => return self.builder.buildFMul(params[0], params[1], ""),
- .div => return self.builder.buildFDiv(params[0], params[1], ""),
- .fmod => return self.builder.buildFRem(params[0], params[1], ""),
- .fmax => return self.builder.buildMaxNum(params[0], params[1], ""),
- .fmin => return self.builder.buildMinNum(params[0], params[1], ""),
+ .neg => return self.wip.un(.fneg, params[0], ""),
+ .add => return self.wip.bin(.fadd, params[0], params[1], ""),
+ .sub => return self.wip.bin(.fsub, params[0], params[1], ""),
+ .mul => return self.wip.bin(.fmul, params[0], params[1], ""),
+ .div => return self.wip.bin(.fdiv, params[0], params[1], ""),
+ .fmod => return self.wip.bin(.frem, params[0], params[1], ""),
+ .fmax => return self.wip.bin(.@"llvm.maxnum.", params[0], params[1], ""),
+ .fmin => return self.wip.bin(.@"llvm.minnum.", params[0], params[1], ""),
else => .{ .intrinsic = "llvm." ++ @tagName(op) },
} else b: {
const float_bits = scalar_ty.floatBits(target);
@@ -7622,26 +8024,19 @@ pub const FuncGen = struct {
.neg => {
// In this case we can generate a softfloat negation by XORing the
// bits with a constant.
- const int_llvm_ty = self.context.intType(float_bits);
- const one = int_llvm_ty.constInt(1, .False);
- const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
- const sign_mask = one.constShl(shift_amt);
- const result = if (ty.zigTypeTag(mod) == .Vector) blk: {
- const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, "");
- const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod));
- const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, "");
- break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, "");
- } else blk: {
- const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, "");
- break :blk self.builder.buildXor(bitcasted_operand, sign_mask, "");
- };
- return self.builder.buildBitCast(result, llvm_ty, "");
- },
- .add, .sub, .div, .mul => FloatOpStrat{
- .libc = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f3", .{
- @tagName(op), compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
+ const int_ty = try o.builder.intType(@intCast(float_bits));
+ const cast_ty = try llvm_ty.changeScalar(int_ty, &o.builder);
+ const sign_mask = try o.builder.splatValue(
+ cast_ty,
+ try o.builder.intConst(int_ty, @as(u128, 1) << @intCast(float_bits - 1)),
+ );
+ const bitcasted_operand = try self.wip.cast(.bitcast, params[0], cast_ty, "");
+ const result = try self.wip.bin(.xor, bitcasted_operand, sign_mask, "");
+ return self.wip.cast(.bitcast, result, llvm_ty, "");
},
+ .add, .sub, .div, .mul => .{ .libc = try o.builder.fmt("__{s}{s}f3", .{
+ @tagName(op), compilerRtFloatAbbrev(float_bits),
+ }) },
.ceil,
.cos,
.exp,
@@ -7660,31 +8055,48 @@ pub const FuncGen = struct {
.sqrt,
.tan,
.trunc,
- => FloatOpStrat{
- .libc = std.fmt.bufPrintZ(&fn_name_buf, "{s}{s}{s}", .{
- libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits),
- }) catch unreachable,
- },
+ => .{ .libc = try o.builder.fmt("{s}{s}{s}", .{
+ libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits),
+ }) },
};
};
- const llvm_fn: *llvm.Value = switch (strat) {
- .intrinsic => |fn_name| self.getIntrinsic(fn_name, &.{llvm_ty}),
+ const llvm_fn = switch (strat) {
+ .intrinsic => |fn_name| try self.getIntrinsic(fn_name, &.{llvm_ty}),
.libc => |fn_name| b: {
- const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty };
- const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty);
+ const scalar_llvm_ty = llvm_ty.scalarType(&o.builder);
+ const libc_fn = try self.getLibcFunction(
+ fn_name,
+ ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len],
+ scalar_llvm_ty,
+ );
if (ty.zigTypeTag(mod) == .Vector) {
- const result = llvm_ty.getUndef();
+ const result = try o.builder.poisonValue(llvm_ty);
return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(mod));
}
- break :b libc_fn;
+ break :b libc_fn.toLlvm(&o.builder);
},
};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params_len, .C, .Auto, "");
+ const llvm_fn_ty = try o.builder.fnType(
+ llvm_ty,
+ ([1]Builder.Type{llvm_ty} ** 3)[0..params.len],
+ .normal,
+ );
+ var llvm_params: [params_len]*llvm.Value = undefined;
+ for (&llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(&self.wip);
+ return (try self.wip.unimplemented(llvm_ty, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &llvm_params,
+ params_len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7696,7 +8108,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend });
}
- fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -7706,72 +8118,67 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.typeOf(extra.lhs);
- const rhs_ty = self.typeOf(extra.rhs);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const rhs_scalar_ty = rhs_ty.scalarType(mod);
const dest_ty = self.typeOfIndex(inst);
const llvm_dest_ty = try o.lowerType(dest_ty);
- const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
- self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "")
- else
- rhs;
+ const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
- const result = self.builder.buildShl(lhs, casted_rhs, "");
- const reconstructed = if (lhs_scalar_ty.isSignedInt(mod))
- self.builder.buildAShr(result, casted_rhs, "")
+ const result = try self.wip.bin(.shl, lhs, casted_rhs, "");
+ const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+ .ashr
else
- self.builder.buildLShr(result, casted_rhs, "");
+ .lshr, result, casted_rhs, "");
- const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
+ const overflow_bit = try self.wip.icmp(.ne, lhs, reconstructed, "");
const result_index = llvmField(dest_ty, 0, mod).?.index;
const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
- const result_alignment = dest_ty.abiAlignment(mod);
- const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
+ const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod));
+ const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment);
{
- const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
- const store_inst = self.builder.buildStore(result, field_ptr);
- store_inst.setAlignment(result_alignment);
+ const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
+ _ = try self.wip.store(.normal, result, field_ptr, result_alignment);
}
{
- const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, "");
- const store_inst = self.builder.buildStore(overflow_bit, field_ptr);
- store_inst.setAlignment(1);
+ const field_alignment = comptime Builder.Alignment.fromByteUnits(1);
+ const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, "");
+ _ = try self.wip.store(.normal, overflow_bit, field_ptr, field_alignment);
}
-
return alloca_inst;
}
- const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, "");
- return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, "");
+ var fields: [2]Builder.Value = undefined;
+ fields[result_index] = result;
+ fields[overflow_index] = overflow_bit;
+ return self.wip.buildAggregate(llvm_dest_ty, &fields, "");
}
- fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildAnd(lhs, rhs, "");
+ return self.wip.bin(.@"and", lhs, rhs, "");
}
- fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airOr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildOr(lhs, rhs, "");
+ return self.wip.bin(.@"or", lhs, rhs, "");
}
- fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airXor(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- return self.builder.buildXor(lhs, rhs, "");
+ return self.wip.bin(.xor, lhs, rhs, "");
}
- fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7780,39 +8187,29 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const rhs_ty = self.typeOf(bin_op.rhs);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
- self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "")
+ const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
+ return self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+ .@"shl nsw"
else
- rhs;
- if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, "");
- return self.builder.buildNUWShl(lhs, casted_rhs, "");
+ .@"shl nuw", lhs, casted_rhs, "");
}
- fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_type = self.typeOf(bin_op.lhs);
- const rhs_type = self.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_type.scalarType(mod);
- const rhs_scalar_ty = rhs_type.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
- self.builder.buildZExt(rhs, try o.lowerType(lhs_type), "")
- else
- rhs;
- return self.builder.buildShl(lhs, casted_rhs, "");
+ const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_type), "");
+ return self.wip.bin(.shl, lhs, casted_rhs, "");
}
- fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7821,42 +8218,36 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const rhs_ty = self.typeOf(bin_op.rhs);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const rhs_scalar_ty = rhs_ty.scalarType(mod);
const lhs_bits = lhs_scalar_ty.bitSize(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits)
- self.builder.buildZExt(rhs, lhs.typeOf(), "")
- else
- rhs;
+ const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
- const result = if (lhs_scalar_ty.isSignedInt(mod))
- self.builder.buildSShlSat(lhs, casted_rhs, "")
+ const result = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod))
+ .@"llvm.sshl.sat."
else
- self.builder.buildUShlSat(lhs, casted_rhs, "");
+ .@"llvm.ushl.sat.", lhs, casted_rhs, "");
// LLVM langref says "If b is (statically or dynamically) equal to or
// larger than the integer bit width of the arguments, the result is a
// poison value."
// However Zig semantics says that saturating shift left can never produce
// undefined; instead it saturates.
- const lhs_scalar_llvm_ty = try o.lowerType(lhs_scalar_ty);
- const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False);
- const lhs_max = lhs_scalar_llvm_ty.constAllOnes();
- if (rhs_ty.zigTypeTag(mod) == .Vector) {
- const vec_len = rhs_ty.vectorLen(mod);
- const bits_vec = self.builder.buildVectorSplat(vec_len, bits, "");
- const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, "");
- const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, "");
- return self.builder.buildSelect(in_range, result, lhs_max_vec, "");
- } else {
- const in_range = self.builder.buildICmp(.ULT, rhs, bits, "");
- return self.builder.buildSelect(in_range, result, lhs_max, "");
- }
+ const lhs_llvm_ty = try o.lowerType(lhs_ty);
+ const lhs_scalar_llvm_ty = lhs_llvm_ty.scalarType(&o.builder);
+ const bits = try o.builder.splatValue(
+ lhs_llvm_ty,
+ try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits),
+ );
+ const lhs_max = try o.builder.splatValue(
+ lhs_llvm_ty,
+ try o.builder.intConst(lhs_scalar_llvm_ty, -1),
+ );
+ const in_range = try self.wip.icmp(.ult, rhs, bits, "");
+ return self.wip.select(in_range, result, lhs_max, "");
}
- fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
+ fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7865,63 +8256,41 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
- const rhs_ty = self.typeOf(bin_op.rhs);
const lhs_scalar_ty = lhs_ty.scalarType(mod);
- const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
- self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "")
- else
- rhs;
+ const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), "");
const is_signed_int = lhs_scalar_ty.isSignedInt(mod);
- if (is_exact) {
- if (is_signed_int) {
- return self.builder.buildAShrExact(lhs, casted_rhs, "");
- } else {
- return self.builder.buildLShrExact(lhs, casted_rhs, "");
- }
- } else {
- if (is_signed_int) {
- return self.builder.buildAShr(lhs, casted_rhs, "");
- } else {
- return self.builder.buildLShr(lhs, casted_rhs, "");
- }
- }
+ return self.wip.bin(if (is_exact)
+ if (is_signed_int) .@"ashr exact" else .@"lshr exact"
+ else if (is_signed_int) .ashr else .lshr, lhs, casted_rhs, "");
}
- fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.typeOfIndex(inst);
- const dest_info = dest_ty.intInfo(mod);
const dest_llvm_ty = try o.lowerType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
const operand_info = operand_ty.intInfo(mod);
- if (operand_info.bits < dest_info.bits) {
- switch (operand_info.signedness) {
- .signed => return self.builder.buildSExt(operand, dest_llvm_ty, ""),
- .unsigned => return self.builder.buildZExt(operand, dest_llvm_ty, ""),
- }
- } else if (operand_info.bits > dest_info.bits) {
- return self.builder.buildTrunc(operand, dest_llvm_ty, "");
- } else {
- return operand;
- }
+ return self.wip.conv(switch (operand_info.signedness) {
+ .signed => .signed,
+ .unsigned => .unsigned,
+ }, operand, dest_llvm_ty, "");
}
- fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
- return self.builder.buildTrunc(operand, dest_llvm_ty, "");
+ return self.wip.cast(.trunc, operand, dest_llvm_ty, "");
}
- fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -7933,26 +8302,30 @@ pub const FuncGen = struct {
const src_bits = operand_ty.floatBits(target);
if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
- const dest_llvm_ty = try o.lowerType(dest_ty);
- return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
+ return self.wip.cast(.fptrunc, operand, try o.lowerType(dest_ty), "");
} else {
const operand_llvm_ty = try o.lowerType(operand_ty);
const dest_llvm_ty = try o.lowerType(dest_ty);
- var fn_name_buf: [64]u8 = undefined;
- const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{
+ const fn_name = try o.builder.fmt("__trunc{s}f{s}f2", .{
compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
- }) catch unreachable;
-
- const params = [1]*llvm.Value{operand};
- const param_types = [1]*llvm.Type{operand_llvm_ty};
- const llvm_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+ });
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
+ const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty);
+ const params = [1]*llvm.Value{operand.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall(
+ llvm_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
}
}
- fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -7964,36 +8337,40 @@ pub const FuncGen = struct {
const src_bits = operand_ty.floatBits(target);
if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
- const dest_llvm_ty = try o.lowerType(dest_ty);
- return self.builder.buildFPExt(operand, dest_llvm_ty, "");
+ return self.wip.cast(.fpext, operand, try o.lowerType(dest_ty), "");
} else {
const operand_llvm_ty = try o.lowerType(operand_ty);
const dest_llvm_ty = try o.lowerType(dest_ty);
- var fn_name_buf: [64]u8 = undefined;
- const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{
+ const fn_name = try o.builder.fmt("__extend{s}f{s}f2", .{
compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
- }) catch unreachable;
-
- const params = [1]*llvm.Value{operand};
- const param_types = [1]*llvm.Type{operand_llvm_ty};
- const llvm_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+ });
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
+ const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty);
+ const params = [1]*llvm.Value{operand.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall(
+ llvm_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
}
}
- fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty);
+ const operand_ptr = try self.sliceOrArrayPtr(operand, ptr_ty);
const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
- return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, "");
+ return self.wip.cast(.ptrtoint, operand_ptr, dest_llvm_ty, "");
}
- fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value {
+ fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const inst_ty = self.typeOfIndex(inst);
@@ -8001,7 +8378,7 @@ pub const FuncGen = struct {
return self.bitCast(operand, operand_ty, inst_ty);
}
- fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value {
+ fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const operand_is_ref = isByRef(operand_ty, mod);
@@ -8013,14 +8390,14 @@ pub const FuncGen = struct {
return operand;
}
- if (llvm_dest_ty.getTypeKind() == .Integer and
- operand.typeOf().getTypeKind() == .Integer)
+ if (llvm_dest_ty.isInteger(&o.builder) and
+ operand.typeOfWip(&self.wip).isInteger(&o.builder))
{
- return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, "");
+ return self.wip.conv(.unsigned, operand, llvm_dest_ty, "");
}
if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) {
- return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
+ return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
}
if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
@@ -8028,104 +8405,97 @@ pub const FuncGen = struct {
if (!result_is_ref) {
return self.dg.todo("implement bitcast vector to non-ref array", .{});
}
- const array_ptr = self.buildAlloca(llvm_dest_ty, null);
+ const array_ptr = try self.buildAlloca(llvm_dest_ty, .default);
const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
- const llvm_store = self.builder.buildStore(operand, array_ptr);
- llvm_store.setAlignment(inst_ty.abiAlignment(mod));
+ const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
+ _ = try self.wip.store(.normal, operand, array_ptr, alignment);
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
const llvm_usize = try o.lowerType(Type.usize);
- const llvm_u32 = self.context.intType(32);
- const zero = llvm_usize.constNull();
+ const usize_zero = try o.builder.intValue(llvm_usize, 0);
const vector_len = operand_ty.arrayLen(mod);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
- const index_usize = llvm_usize.constInt(i, .False);
- const index_u32 = llvm_u32.constInt(i, .False);
- const indexes: [2]*llvm.Value = .{ zero, index_usize };
- const elem_ptr = self.builder.buildInBoundsGEP(llvm_dest_ty, array_ptr, &indexes, indexes.len, "");
- const elem = self.builder.buildExtractElement(operand, index_u32, "");
- _ = self.builder.buildStore(elem, elem_ptr);
+ const elem_ptr = try self.wip.gep(.inbounds, llvm_dest_ty, array_ptr, &.{
+ usize_zero, try o.builder.intValue(llvm_usize, i),
+ }, "");
+ const elem =
+ try self.wip.extractElement(operand, try o.builder.intValue(.i32, i), "");
+ _ = try self.wip.store(.normal, elem, elem_ptr, .default);
}
}
return array_ptr;
} else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = operand_ty.childType(mod);
const llvm_vector_ty = try o.lowerType(inst_ty);
- if (!operand_is_ref) {
- return self.dg.todo("implement bitcast non-ref array to vector", .{});
- }
+ if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{});
const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
- const vector = self.builder.buildLoad(llvm_vector_ty, operand, "");
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
- vector.setAlignment(elem_ty.abiAlignment(mod));
- return vector;
+ const alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+ return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
const array_llvm_ty = try o.lowerType(operand_ty);
const elem_llvm_ty = try o.lowerType(elem_ty);
const llvm_usize = try o.lowerType(Type.usize);
- const llvm_u32 = self.context.intType(32);
- const zero = llvm_usize.constNull();
+ const usize_zero = try o.builder.intValue(llvm_usize, 0);
const vector_len = operand_ty.arrayLen(mod);
- var vector = llvm_vector_ty.getUndef();
+ var vector = try o.builder.poisonValue(llvm_vector_ty);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
- const index_usize = llvm_usize.constInt(i, .False);
- const index_u32 = llvm_u32.constInt(i, .False);
- const indexes: [2]*llvm.Value = .{ zero, index_usize };
- const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indexes, indexes.len, "");
- const elem = self.builder.buildLoad(elem_llvm_ty, elem_ptr, "");
- vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
+ const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, operand, &.{
+ usize_zero, try o.builder.intValue(llvm_usize, i),
+ }, "");
+ const elem = try self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, "");
+ vector =
+ try self.wip.insertElement(vector, elem, try o.builder.intValue(.i32, i), "");
}
-
return vector;
}
}
if (operand_is_ref) {
- const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, "");
- load_inst.setAlignment(operand_ty.abiAlignment(mod));
- return load_inst;
+ const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
+ return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
}
if (result_is_ref) {
- const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
- const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
- const store_inst = self.builder.buildStore(operand, result_ptr);
- store_inst.setAlignment(alignment);
+ const alignment = Builder.Alignment.fromByteUnits(
+ @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
+ );
+ const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
+ _ = try self.wip.store(.normal, operand, result_ptr, alignment);
return result_ptr;
}
- if (llvm_dest_ty.getTypeKind() == .Struct) {
+ if (llvm_dest_ty.isStruct(&o.builder)) {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values.
// Therefore, we store operand to alloca, then load for result.
- const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
- const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
- const store_inst = self.builder.buildStore(operand, result_ptr);
- store_inst.setAlignment(alignment);
- const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, "");
- load_inst.setAlignment(alignment);
- return load_inst;
+ const alignment = Builder.Alignment.fromByteUnits(
+ @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
+ );
+ const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
+ _ = try self.wip.store(.normal, operand, result_ptr, alignment);
+ return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
}
- return self.builder.buildBitCast(operand, llvm_dest_ty, "");
+ return self.wip.cast(.bitcast, operand, llvm_dest_ty, "");
}
- fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
return operand;
}
- fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const arg_val = self.args[self.arg_index];
@@ -8133,9 +8503,7 @@ pub const FuncGen = struct {
const inst_ty = self.typeOfIndex(inst);
if (o.di_builder) |dib| {
- if (needDbgVarWorkaround(o)) {
- return arg_val;
- }
+ if (needDbgVarWorkaround(o)) return arg_val;
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const func_index = self.dg.decl.getOwnedFunctionIndex();
@@ -8150,61 +8518,64 @@ pub const FuncGen = struct {
try o.lowerDebugType(inst_ty, .full),
true, // always preserve
0, // flags
- self.arg_index, // includes +1 because 0 is return type
+ @intCast(self.arg_index), // includes +1 because 0 is return type
);
const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null);
- const insert_block = self.builder.getInsertBlock();
+ const insert_block = self.wip.cursor.block.toLlvm(&self.wip);
if (isByRef(inst_ty, mod)) {
- _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block);
+ _ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = inst_ty.abiAlignment(mod);
- const alloca = self.buildAlloca(arg_val.typeOf(), alignment);
- const store_inst = self.builder.buildStore(arg_val, alloca);
- store_inst.setAlignment(alignment);
- _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block);
+ const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
+ const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
+ _ = try self.wip.store(.normal, arg_val, alloca, alignment);
+ _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else {
- _ = dib.insertDbgValueIntrinsicAtEnd(arg_val, di_local_var, debug_loc, insert_block);
+ _ = dib.insertDbgValueIntrinsicAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
}
}
return arg_val;
}
- fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ptr_ty = self.typeOfIndex(inst);
const pointee_type = ptr_ty.childType(mod);
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod))
- return o.lowerPtrToVoid(ptr_ty);
+ return (try o.lowerPtrToVoid(ptr_ty)).toValue();
const pointee_llvm_ty = try o.lowerType(pointee_type);
- const alignment = ptr_ty.ptrAlignment(mod);
+ const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
return self.buildAlloca(pointee_llvm_ty, alignment);
}
- fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ptr_ty = self.typeOfIndex(inst);
const ret_ty = ptr_ty.childType(mod);
- if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty);
- if (self.ret_ptr) |ret_ptr| return ret_ptr;
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
+ return (try o.lowerPtrToVoid(ptr_ty)).toValue();
+ if (self.ret_ptr != .none) return self.ret_ptr;
const ret_llvm_ty = try o.lowerType(ret_ty);
- return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod));
+ const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+ return self.buildAlloca(ret_llvm_ty, alignment);
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
/// put the alloca instruction at the top of the function!
- fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) *llvm.Value {
- const o = self.dg.object;
- const mod = o.module;
- const target = mod.getTarget();
- return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target);
+ fn buildAlloca(
+ self: *FuncGen,
+ llvm_ty: Builder.Type,
+ alignment: Builder.Alignment,
+ ) Allocator.Error!Builder.Value {
+ const target = self.dg.object.module.getTarget();
+ return buildAllocaInner(&self.wip, self.di_scope != null, llvm_ty, alignment, target);
}
- fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -8217,25 +8588,30 @@ pub const FuncGen = struct {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
- const u8_llvm_ty = self.context.intType(8);
const fill_byte = if (safety)
- u8_llvm_ty.constInt(0xaa, .False)
+ try o.builder.intConst(.i8, 0xaa)
else
- u8_llvm_ty.getUndef();
+ try o.builder.undefConst(.i8);
const operand_size = operand_ty.abiSize(mod);
- const usize_llvm_ty = try o.lowerType(Type.usize);
- const len = usize_llvm_ty.constInt(operand_size, .False);
- const dest_ptr_align = ptr_ty.ptrAlignment(mod);
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod));
+ const usize_ty = try o.lowerType(Type.usize);
+ const len = try o.builder.intValue(usize_ty, operand_size);
+ const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet(
+ dest_ptr.toLlvm(&self.wip),
+ fill_byte.toLlvm(&o.builder),
+ len.toLlvm(&self.wip),
+ @intCast(dest_ptr_align.toByteUnits() orelse 0),
+ ptr_ty.isVolatilePtr(mod),
+ ), &self.wip);
if (safety and mod.comp.bin_file.options.valgrind) {
- self.valgrindMarkUndef(dest_ptr, len);
+ try self.valgrindMarkUndef(dest_ptr, len);
}
- return null;
+ return .none;
}
const src_operand = try self.resolveInst(bin_op.rhs);
- try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic);
- return null;
+ try self.store(dest_ptr, ptr_ty, src_operand, .none);
+ return .none;
}
/// As an optimization, we want to avoid unnecessary copies of isByRef=true
@@ -8260,7 +8636,7 @@ pub const FuncGen = struct {
return false;
}
- fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
const o = fg.dg.object;
const mod = o.module;
const inst = body_tail[0];
@@ -8277,22 +8653,40 @@ pub const FuncGen = struct {
return fg.load(ptr, ptr_ty);
}
- fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
- const llvm_fn = self.getIntrinsic("llvm.trap", &.{});
- _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, "");
- _ = self.builder.buildUnreachable();
- return null;
+ const o = self.dg.object;
+ const llvm_fn = try self.getIntrinsic("llvm.trap", &.{});
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ undefined,
+ 0,
+ .Cold,
+ .Auto,
+ "",
+ ), &self.wip);
+ _ = try self.wip.@"unreachable"();
+ return .none;
}
- fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
- const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
- _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .C, .Auto, "");
- return null;
+ const o = self.dg.object;
+ const llvm_fn = try self.getIntrinsic("llvm.debugtrap", &.{});
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ undefined,
+ 0,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
+ return .none;
}
- fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
const o = self.dg.object;
const mod = o.module;
@@ -8300,43 +8694,61 @@ pub const FuncGen = struct {
const target = mod.getTarget();
if (!target_util.supportsReturnAddress(target)) {
// https://github.com/ziglang/zig/issues/11946
- return llvm_usize.constNull();
+ return o.builder.intValue(llvm_usize, 0);
}
- const llvm_i32 = self.context.intType(32);
- const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{});
- const params = [_]*llvm.Value{llvm_i32.constNull()};
- const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
- return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
+ const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{});
+ const params = [_]*llvm.Value{
+ (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder),
+ };
+ const ptr_val = (try self.wip.unimplemented(.ptr, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.ptr, &.{.i32}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ &params,
+ params.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
+ return self.wip.cast(.ptrtoint, ptr_val, llvm_usize, "");
}
- fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
_ = inst;
const o = self.dg.object;
- const llvm_i32 = self.context.intType(32);
const llvm_fn_name = "llvm.frameaddress.p0";
const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- const llvm_p0i8 = self.context.pointerType(0);
- const param_types = [_]*llvm.Type{llvm_i32};
- const fn_type = llvm.functionType(llvm_p0i8, &param_types, param_types.len, .False);
- break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type);
+ const fn_type = try o.builder.fnType(.ptr, &.{.i32}, .normal);
+ break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder));
};
+ const llvm_fn_ty = try o.builder.fnType(.ptr, &.{.i32}, .normal);
- const params = [_]*llvm.Value{llvm_i32.constNull()};
- const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
- const llvm_usize = try o.lowerType(Type.usize);
- return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
+ const params = [_]*llvm.Value{
+ (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder),
+ };
+ const ptr_val = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn,
+ &params,
+ params.len,
+ .Fast,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
+ return self.wip.cast(.ptrtoint, ptr_val, try o.lowerType(Type.usize), "");
}
- fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airFence(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const atomic_order = self.air.instructions.items(.data)[inst].fence;
- const llvm_memory_order = toLlvmAtomicOrdering(atomic_order);
- const single_threaded = llvm.Bool.fromBool(self.single_threaded);
- _ = self.builder.buildFence(llvm_memory_order, single_threaded, "");
- return null;
+ const ordering = toLlvmAtomicOrdering(atomic_order);
+ _ = try self.wip.fence(self.sync_scope, ordering);
+ return .none;
}
- fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value {
+ fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -8345,46 +8757,51 @@ pub const FuncGen = struct {
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
const operand_ty = self.typeOf(extra.ptr).childType(mod);
- const opt_abi_ty = o.getAtomicAbiType(operand_ty, false);
- if (opt_abi_ty) |abi_ty| {
+ const llvm_operand_ty = try o.lowerType(operand_ty);
+ const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
+ if (llvm_abi_ty != .none) {
// operand needs widening and truncating
- if (operand_ty.isSignedInt(mod)) {
- expected_value = self.builder.buildSExt(expected_value, abi_ty, "");
- new_value = self.builder.buildSExt(new_value, abi_ty, "");
- } else {
- expected_value = self.builder.buildZExt(expected_value, abi_ty, "");
- new_value = self.builder.buildZExt(new_value, abi_ty, "");
- }
+ const signedness: Builder.Function.Instruction.Cast.Signedness =
+ if (operand_ty.isSignedInt(mod)) .signed else .unsigned;
+ expected_value = try self.wip.conv(signedness, expected_value, llvm_abi_ty, "");
+ new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, "");
}
- const result = self.builder.buildAtomicCmpXchg(
- ptr,
- expected_value,
- new_value,
- toLlvmAtomicOrdering(extra.successOrder()),
- toLlvmAtomicOrdering(extra.failureOrder()),
- llvm.Bool.fromBool(self.single_threaded),
+
+ const llvm_result_ty = try o.builder.structType(.normal, &.{
+ if (llvm_abi_ty != .none) llvm_abi_ty else llvm_operand_ty,
+ .i1,
+ });
+ const result = (try self.wip.unimplemented(llvm_result_ty, "")).finish(
+ self.builder.buildAtomicCmpXchg(
+ ptr.toLlvm(&self.wip),
+ expected_value.toLlvm(&self.wip),
+ new_value.toLlvm(&self.wip),
+ @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.successOrder()))),
+ @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.failureOrder()))),
+ llvm.Bool.fromBool(self.sync_scope == .singlethread),
+ ),
+ &self.wip,
);
- result.setWeak(llvm.Bool.fromBool(is_weak));
+ result.toLlvm(&self.wip).setWeak(llvm.Bool.fromBool(is_weak));
const optional_ty = self.typeOfIndex(inst);
- var payload = self.builder.buildExtractValue(result, 0, "");
- if (opt_abi_ty != null) {
- payload = self.builder.buildTrunc(payload, try o.lowerType(operand_ty), "");
- }
- const success_bit = self.builder.buildExtractValue(result, 1, "");
+ var payload = try self.wip.extractValue(result, &.{0}, "");
+ if (llvm_abi_ty != .none) payload = try self.wip.cast(.trunc, payload, llvm_operand_ty, "");
+ const success_bit = try self.wip.extractValue(result, &.{1}, "");
if (optional_ty.optionalReprIsPayload(mod)) {
- return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, "");
+ const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip));
+ return self.wip.select(success_bit, zero, payload, "");
}
comptime assert(optional_layout_version == 3);
- const non_null_bit = self.builder.buildNot(success_bit, "");
+ const non_null_bit = try self.wip.not(success_bit, "");
return buildOptional(self, optional_ty, payload, non_null_bit);
}
- fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
@@ -8397,120 +8814,146 @@ pub const FuncGen = struct {
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
- const single_threaded = llvm.Bool.fromBool(self.single_threaded);
- const opt_abi_ty = o.getAtomicAbiType(operand_ty, op == .Xchg);
- if (opt_abi_ty) |abi_ty| {
+ const single_threaded = llvm.Bool.fromBool(self.sync_scope == .singlethread);
+ const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg);
+ const llvm_operand_ty = try o.lowerType(operand_ty);
+ if (llvm_abi_ty != .none) {
// operand needs widening and truncating or bitcasting.
- const casted_operand = if (is_float)
- self.builder.buildBitCast(operand, abi_ty, "")
- else if (is_signed_int)
- self.builder.buildSExt(operand, abi_ty, "")
- else
- self.builder.buildZExt(operand, abi_ty, "");
+ const casted_operand = try self.wip.cast(
+ if (is_float) .bitcast else if (is_signed_int) .sext else .zext,
+ @enumFromInt(@intFromEnum(operand)),
+ llvm_abi_ty,
+ "",
+ );
- const uncasted_result = self.builder.buildAtomicRmw(
- op,
- ptr,
- casted_operand,
- ordering,
- single_threaded,
+ const uncasted_result = (try self.wip.unimplemented(llvm_abi_ty, "")).finish(
+ self.builder.buildAtomicRmw(
+ op,
+ ptr.toLlvm(&self.wip),
+ casted_operand.toLlvm(&self.wip),
+ @enumFromInt(@intFromEnum(ordering)),
+ single_threaded,
+ ),
+ &self.wip,
);
- const operand_llvm_ty = try o.lowerType(operand_ty);
+
if (is_float) {
- return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, "");
+ return self.wip.cast(.bitcast, uncasted_result, llvm_operand_ty, "");
} else {
- return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, "");
+ return self.wip.cast(.trunc, uncasted_result, llvm_operand_ty, "");
}
}
- if (operand.typeOf().getTypeKind() != .Pointer) {
- return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded);
+ if (!llvm_operand_ty.isPointer(&o.builder)) {
+ return (try self.wip.unimplemented(llvm_operand_ty, "")).finish(
+ self.builder.buildAtomicRmw(
+ op,
+ ptr.toLlvm(&self.wip),
+ operand.toLlvm(&self.wip),
+ @enumFromInt(@intFromEnum(ordering)),
+ single_threaded,
+ ),
+ &self.wip,
+ );
}
// It's a pointer but we need to treat it as an int.
- const usize_llvm_ty = try o.lowerType(Type.usize);
- const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, "");
- const uncasted_result = self.builder.buildAtomicRmw(
- op,
- ptr,
- casted_operand,
- ordering,
- single_threaded,
+ const llvm_usize = try o.lowerType(Type.usize);
+ const casted_operand = try self.wip.cast(.ptrtoint, operand, llvm_usize, "");
+ const uncasted_result = (try self.wip.unimplemented(llvm_usize, "")).finish(
+ self.builder.buildAtomicRmw(
+ op,
+ ptr.toLlvm(&self.wip),
+ casted_operand.toLlvm(&self.wip),
+ @enumFromInt(@intFromEnum(ordering)),
+ single_threaded,
+ ),
+ &self.wip,
);
- const operand_llvm_ty = try o.lowerType(operand_ty);
- return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, "");
+ return self.wip.cast(.inttoptr, uncasted_result, llvm_operand_ty, "");
}
- fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
- const ptr_info = ptr_ty.ptrInfo(mod);
- const elem_ty = ptr_info.child.toType();
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod))
- return null;
+ const info = ptr_ty.ptrInfo(mod);
+ const elem_ty = info.child.toType();
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
- const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false);
- const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse
- ptr_info.child.toType().abiAlignment(mod)));
- const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile);
+ const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
+ const ptr_alignment = Builder.Alignment.fromByteUnits(
+ info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod),
+ );
+ const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) {
+ false => .normal,
+ true => .@"volatile",
+ };
const elem_llvm_ty = try o.lowerType(elem_ty);
- if (opt_abi_llvm_ty) |abi_llvm_ty| {
+ if (llvm_abi_ty != .none) {
// operand needs widening and truncating
- const load_inst = self.builder.buildLoad(abi_llvm_ty, ptr, "");
- load_inst.setAlignment(ptr_alignment);
- load_inst.setVolatile(ptr_volatile);
- load_inst.setOrdering(ordering);
- return self.builder.buildTrunc(load_inst, elem_llvm_ty, "");
+ const loaded = try self.wip.loadAtomic(
+ ptr_kind,
+ llvm_abi_ty,
+ ptr,
+ self.sync_scope,
+ ordering,
+ ptr_alignment,
+ "",
+ );
+ return self.wip.cast(.trunc, loaded, elem_llvm_ty, "");
}
- const load_inst = self.builder.buildLoad(elem_llvm_ty, ptr, "");
- load_inst.setAlignment(ptr_alignment);
- load_inst.setVolatile(ptr_volatile);
- load_inst.setOrdering(ordering);
- return load_inst;
+ return self.wip.loadAtomic(
+ ptr_kind,
+ elem_llvm_ty,
+ ptr,
+ self.sync_scope,
+ ordering,
+ ptr_alignment,
+ "",
+ );
}
fn airAtomicStore(
self: *FuncGen,
inst: Air.Inst.Index,
- ordering: llvm.AtomicOrdering,
- ) !?*llvm.Value {
+ ordering: Builder.AtomicOrdering,
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType(mod);
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null;
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
- const opt_abi_ty = o.getAtomicAbiType(operand_ty, false);
+ const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false);
- if (opt_abi_ty) |abi_ty| {
+ if (llvm_abi_ty != .none) {
// operand needs widening
- if (operand_ty.isSignedInt(mod)) {
- element = self.builder.buildSExt(element, abi_ty, "");
- } else {
- element = self.builder.buildZExt(element, abi_ty, "");
- }
+ element = try self.wip.conv(
+ if (operand_ty.isSignedInt(mod)) .signed else .unsigned,
+ element,
+ llvm_abi_ty,
+ "",
+ );
}
try self.store(ptr, ptr_ty, element, ordering);
- return null;
+ return .none;
}
- fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOf(bin_op.rhs);
- const target = mod.getTarget();
- const dest_ptr_align = ptr_ty.ptrAlignment(mod);
- const u8_llvm_ty = self.context.intType(8);
- const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
+ const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+ const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
const is_volatile = ptr_ty.isVolatilePtr(mod);
// Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless
@@ -8527,20 +8970,26 @@ pub const FuncGen = struct {
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
const fill_byte = if (safety)
- u8_llvm_ty.constInt(0xaa, .False)
+ try o.builder.intValue(.i8, 0xaa)
else
- u8_llvm_ty.getUndef();
- const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+ try o.builder.undefValue(.i8);
+ const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
if (intrinsic_len0_traps) {
try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
} else {
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet(
+ dest_ptr.toLlvm(&self.wip),
+ fill_byte.toLlvm(&self.wip),
+ len.toLlvm(&self.wip),
+ @intCast(dest_ptr_align.toByteUnits() orelse 0),
+ is_volatile,
+ ), &self.wip);
}
if (safety and mod.comp.bin_file.options.valgrind) {
- self.valgrindMarkUndef(dest_ptr, len);
+ try self.valgrindMarkUndef(dest_ptr, len);
}
- return null;
+ return .none;
}
// Test if the element value is compile-time known to be a
@@ -8548,18 +8997,21 @@ pub const FuncGen = struct {
// repeating byte pattern of 0 bytes. In such case, the memset
// intrinsic can be used.
if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| {
- const fill_byte = try self.resolveValue(.{
- .ty = Type.u8,
- .val = byte_val,
- });
- const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+ const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val });
+ const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
if (intrinsic_len0_traps) {
- try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
+ try self.safeWasmMemset(dest_ptr, fill_byte.toValue(), len, dest_ptr_align, is_volatile);
} else {
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet(
+ dest_ptr.toLlvm(&self.wip),
+ fill_byte.toLlvm(&o.builder),
+ len.toLlvm(&self.wip),
+ @intCast(dest_ptr_align.toByteUnits() orelse 0),
+ is_volatile,
+ ), &self.wip);
}
- return null;
+ return .none;
}
}
@@ -8569,14 +9021,20 @@ pub const FuncGen = struct {
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
const fill_byte = try self.bitCast(value, elem_ty, Type.u8);
- const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+ const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
if (intrinsic_len0_traps) {
try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
} else {
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet(
+ dest_ptr.toLlvm(&self.wip),
+ fill_byte.toLlvm(&self.wip),
+ len.toLlvm(&self.wip),
+ @intCast(dest_ptr_align.toByteUnits() orelse 0),
+ is_volatile,
+ ), &self.wip);
}
- return null;
+ return .none;
}
// non-byte-sized element. lower with a loop. something like this:
@@ -8584,88 +9042,92 @@ pub const FuncGen = struct {
// entry:
// ...
// %end_ptr = getelementptr %ptr, %len
- // br loop
+ // br %loop
// loop:
// %it_ptr = phi body %next_ptr, entry %ptr
// %end = cmp eq %it_ptr, %end_ptr
- // cond_br %end body, end
+ // br %end, %body, %end
// body:
// store %it_ptr, %value
// %next_ptr = getelementptr %it_ptr, 1
- // br loop
+ // br %loop
// end:
// ...
- const entry_block = self.builder.getInsertBlock();
- const loop_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetLoop");
- const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody");
- const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
+ const entry_block = self.wip.cursor.block;
+ const loop_block = try self.wip.block(2, "InlineMemsetLoop");
+ const body_block = try self.wip.block(1, "InlineMemsetBody");
+ const end_block = try self.wip.block(1, "InlineMemsetEnd");
- const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
+ const usize_ty = try o.lowerType(Type.usize);
const len = switch (ptr_ty.ptrSize(mod)) {
- .Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
- .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False),
+ .Slice => try self.wip.extractValue(dest_slice, &.{1}, ""),
+ .One => try o.builder.intValue(usize_ty, ptr_ty.childType(mod).arrayLen(mod)),
.Many, .C => unreachable,
};
const elem_llvm_ty = try o.lowerType(elem_ty);
- const len_gep = [_]*llvm.Value{len};
- const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, "");
- _ = self.builder.buildBr(loop_block);
+ const end_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, dest_ptr, &.{len}, "");
+ _ = try self.wip.br(loop_block);
- self.builder.positionBuilderAtEnd(loop_block);
- const it_ptr = self.builder.buildPhi(self.context.pointerType(0), "");
- const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, "");
- _ = self.builder.buildCondBr(end, body_block, end_block);
+ self.wip.cursor = .{ .block = loop_block };
+ const it_ptr = try self.wip.phi(.ptr, "");
+ const end = try self.wip.icmp(.ne, it_ptr.toValue(), end_ptr, "");
+ _ = try self.wip.brCond(end, body_block, end_block);
- self.builder.positionBuilderAtEnd(body_block);
+ self.wip.cursor = .{ .block = body_block };
const elem_abi_alignment = elem_ty.abiAlignment(mod);
- const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align);
+ const it_ptr_alignment = Builder.Alignment.fromByteUnits(
+ @min(elem_abi_alignment, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)),
+ );
if (isByRef(elem_ty, mod)) {
- _ = self.builder.buildMemCpy(
- it_ptr,
- it_ptr_alignment,
- value,
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy(
+ it_ptr.toValue().toLlvm(&self.wip),
+ @intCast(it_ptr_alignment.toByteUnits() orelse 0),
+ value.toLlvm(&self.wip),
elem_abi_alignment,
- llvm_usize_ty.constInt(elem_abi_size, .False),
+ (try o.builder.intConst(usize_ty, elem_abi_size)).toLlvm(&o.builder),
is_volatile,
- );
- } else {
- const store_inst = self.builder.buildStore(value, it_ptr);
- store_inst.setAlignment(it_ptr_alignment);
- store_inst.setVolatile(llvm.Bool.fromBool(is_volatile));
- }
- const one_gep = [_]*llvm.Value{llvm_usize_ty.constInt(1, .False)};
- const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, "");
- _ = self.builder.buildBr(loop_block);
+ ), &self.wip);
+ } else _ = try self.wip.store(switch (is_volatile) {
+ false => .normal,
+ true => .@"volatile",
+ }, value, it_ptr.toValue(), it_ptr_alignment);
+ const next_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, it_ptr.toValue(), &.{
+ try o.builder.intValue(usize_ty, 1),
+ }, "");
+ _ = try self.wip.br(loop_block);
- self.builder.positionBuilderAtEnd(end_block);
-
- const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr };
- const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block, entry_block };
- it_ptr.addIncoming(&incoming_values, &incoming_blocks, 2);
-
- return null;
+ self.wip.cursor = .{ .block = end_block };
+ try it_ptr.finish(&.{ next_ptr, dest_ptr }, &.{ body_block, entry_block }, &self.wip);
+ return .none;
}
fn safeWasmMemset(
self: *FuncGen,
- dest_ptr: *llvm.Value,
- fill_byte: *llvm.Value,
- len: *llvm.Value,
- dest_ptr_align: u32,
+ dest_ptr: Builder.Value,
+ fill_byte: Builder.Value,
+ len: Builder.Value,
+ dest_ptr_align: Builder.Alignment,
is_volatile: bool,
) !void {
- const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth());
- const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq);
- const memset_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapSkip");
- const end_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapEnd");
- _ = self.builder.buildCondBr(cond, memset_block, end_block);
- self.builder.positionBuilderAtEnd(memset_block);
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
- _ = self.builder.buildBr(end_block);
- self.builder.positionBuilderAtEnd(end_block);
+ const o = self.dg.object;
+ const llvm_usize_ty = try o.lowerType(Type.usize);
+ const cond = try self.cmp(len, try o.builder.intValue(llvm_usize_ty, 0), Type.usize, .neq);
+ const memset_block = try self.wip.block(1, "MemsetTrapSkip");
+ const end_block = try self.wip.block(2, "MemsetTrapEnd");
+ _ = try self.wip.brCond(cond, memset_block, end_block);
+ self.wip.cursor = .{ .block = memset_block };
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet(
+ dest_ptr.toLlvm(&self.wip),
+ fill_byte.toLlvm(&self.wip),
+ len.toLlvm(&self.wip),
+ @intCast(dest_ptr_align.toByteUnits() orelse 0),
+ is_volatile,
+ ), &self.wip);
+ _ = try self.wip.br(end_block);
+ self.wip.cursor = .{ .block = end_block };
}
- fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -8673,9 +9135,9 @@ pub const FuncGen = struct {
const dest_ptr_ty = self.typeOf(bin_op.lhs);
const src_slice = try self.resolveInst(bin_op.rhs);
const src_ptr_ty = self.typeOf(bin_op.rhs);
- const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty);
- const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
- const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
+ const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty);
+ const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
+ const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod);
// When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction.
@@ -8687,84 +9149,81 @@ pub const FuncGen = struct {
std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and
dest_ptr_ty.isSlice(mod))
{
- const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth());
- const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq);
- const memcpy_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapSkip");
- const end_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapEnd");
- _ = self.builder.buildCondBr(cond, memcpy_block, end_block);
- self.builder.positionBuilderAtEnd(memcpy_block);
- _ = self.builder.buildMemCpy(
- dest_ptr,
+ const zero_usize = try o.builder.intValue(try o.lowerType(Type.usize), 0);
+ const cond = try self.cmp(len, zero_usize, Type.usize, .neq);
+ const memcpy_block = try self.wip.block(1, "MemcpyTrapSkip");
+ const end_block = try self.wip.block(2, "MemcpyTrapEnd");
+ _ = try self.wip.brCond(cond, memcpy_block, end_block);
+ self.wip.cursor = .{ .block = memcpy_block };
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy(
+ dest_ptr.toLlvm(&self.wip),
dest_ptr_ty.ptrAlignment(mod),
- src_ptr,
+ src_ptr.toLlvm(&self.wip),
src_ptr_ty.ptrAlignment(mod),
- len,
+ len.toLlvm(&self.wip),
is_volatile,
- );
- _ = self.builder.buildBr(end_block);
- self.builder.positionBuilderAtEnd(end_block);
- return null;
+ ), &self.wip);
+ _ = try self.wip.br(end_block);
+ self.wip.cursor = .{ .block = end_block };
+ return .none;
}
- _ = self.builder.buildMemCpy(
- dest_ptr,
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy(
+ dest_ptr.toLlvm(&self.wip),
dest_ptr_ty.ptrAlignment(mod),
- src_ptr,
+ src_ptr.toLlvm(&self.wip),
src_ptr_ty.ptrAlignment(mod),
- len,
+ len.toLlvm(&self.wip),
is_volatile,
- );
- return null;
+ ), &self.wip);
+ return .none;
}
- fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const un_ty = self.typeOf(bin_op.lhs).childType(mod);
const layout = un_ty.unionGetLayout(mod);
- if (layout.tag_size == 0) return null;
+ if (layout.tag_size == 0) return .none;
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
if (layout.payload_size == 0) {
// TODO alignment on this store
- _ = self.builder.buildStore(new_tag, union_ptr);
- return null;
+ _ = try self.wip.store(.normal, new_tag, union_ptr, .default);
+ return .none;
}
- const un_llvm_ty = try o.lowerType(un_ty);
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
- const tag_field_ptr = self.builder.buildStructGEP(un_llvm_ty, union_ptr, tag_index, "");
+ const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(un_ty), union_ptr, tag_index, "");
// TODO alignment on this store
- _ = self.builder.buildStore(new_tag, tag_field_ptr);
- return null;
+ _ = try self.wip.store(.normal, new_tag, tag_field_ptr, .default);
+ return .none;
}
- fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.typeOf(ty_op.operand);
const layout = un_ty.unionGetLayout(mod);
- if (layout.tag_size == 0) return null;
+ if (layout.tag_size == 0) return .none;
const union_handle = try self.resolveInst(ty_op.operand);
if (isByRef(un_ty, mod)) {
const llvm_un_ty = try o.lowerType(un_ty);
- if (layout.payload_size == 0) {
- return self.builder.buildLoad(llvm_un_ty, union_handle, "");
- }
+ if (layout.payload_size == 0)
+ return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
- const tag_field_ptr = self.builder.buildStructGEP(llvm_un_ty, union_handle, tag_index, "");
- return self.builder.buildLoad(llvm_un_ty.structGetTypeAtIndex(tag_index), tag_field_ptr, "");
+ const tag_field_ptr = try self.wip.gepStruct(llvm_un_ty, union_handle, tag_index, "");
+ const llvm_tag_ty = llvm_un_ty.structFields(&o.builder)[tag_index];
+ return self.wip.load(.normal, llvm_tag_ty, tag_field_ptr, .default, "");
} else {
- if (layout.payload_size == 0) {
- return union_handle;
- }
+ if (layout.payload_size == 0) return union_handle;
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
- return self.builder.buildExtractValue(union_handle, tag_index, "");
+ return self.wip.extractValue(union_handle, &.{tag_index}, "");
}
}
- fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value {
+ fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !Builder.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
@@ -8772,7 +9231,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(op, operand_ty, 1, .{operand});
}
- fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8782,60 +9241,64 @@ pub const FuncGen = struct {
return self.buildFloatOp(.neg, operand_ty, 1, .{operand});
}
- fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const llvm_i1 = self.context.intType(1);
- const operand_llvm_ty = try o.lowerType(operand_ty);
- const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
+ const llvm_operand_ty = try o.lowerType(operand_ty);
+ const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{ llvm_operand_ty, .i1 }, .normal);
+ const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty});
- const params = [_]*llvm.Value{ operand, llvm_i1.constNull() };
- const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
+ const params = [_]*llvm.Value{
+ operand.toLlvm(&self.wip),
+ Builder.Constant.false.toLlvm(&o.builder),
+ };
+ const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ fn_val,
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
const result_ty = self.typeOfIndex(inst);
- const result_llvm_ty = try o.lowerType(result_ty);
-
- const bits = operand_ty.intInfo(mod).bits;
- const result_bits = result_ty.intInfo(mod).bits;
- if (bits > result_bits) {
- return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
- } else if (bits < result_bits) {
- return self.builder.buildZExt(wrong_size_result, result_llvm_ty, "");
- } else {
- return wrong_size_result;
- }
+ return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), "");
}
- fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const params = [_]*llvm.Value{operand};
- const operand_llvm_ty = try o.lowerType(operand_ty);
- const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
-
- const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
+ const llvm_operand_ty = try o.lowerType(operand_ty);
+ const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal);
+ const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty});
+
+ const params = [_]*llvm.Value{operand.toLlvm(&self.wip)};
+ const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ fn_val,
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
const result_ty = self.typeOfIndex(inst);
- const result_llvm_ty = try o.lowerType(result_ty);
-
- const bits = operand_ty.intInfo(mod).bits;
- const result_bits = result_ty.intInfo(mod).bits;
- if (bits > result_bits) {
- return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
- } else if (bits < result_bits) {
- return self.builder.buildZExt(wrong_size_result, result_llvm_ty, "");
- } else {
- return wrong_size_result;
- }
+ return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), "");
}
- fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8844,52 +9307,47 @@ pub const FuncGen = struct {
assert(bits % 8 == 0);
var operand = try self.resolveInst(ty_op.operand);
- var operand_llvm_ty = try o.lowerType(operand_ty);
+ var llvm_operand_ty = try o.lowerType(operand_ty);
if (bits % 16 == 8) {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
// The truncated result at the end will be the correct bswap
- const scalar_llvm_ty = self.context.intType(bits + 8);
+ const scalar_ty = try o.builder.intType(@intCast(bits + 8));
if (operand_ty.zigTypeTag(mod) == .Vector) {
const vec_len = operand_ty.vectorLen(mod);
- operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len);
-
- const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
- defer self.gpa.free(shifts);
+ llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty);
+ } else llvm_operand_ty = scalar_ty;
- for (shifts) |*elem| {
- elem.* = scalar_llvm_ty.constInt(8, .False);
- }
- const shift_vec = llvm.constVector(shifts.ptr, vec_len);
+ const shift_amt =
+ try o.builder.splatValue(llvm_operand_ty, try o.builder.intConst(scalar_ty, 8));
+ const extended = try self.wip.cast(.zext, operand, llvm_operand_ty, "");
+ operand = try self.wip.bin(.shl, extended, shift_amt, "");
- const extended = self.builder.buildZExt(operand, operand_llvm_ty, "");
- operand = self.builder.buildShl(extended, shift_vec, "");
- } else {
- const extended = self.builder.buildZExt(operand, scalar_llvm_ty, "");
- operand = self.builder.buildShl(extended, scalar_llvm_ty.constInt(8, .False), "");
- operand_llvm_ty = scalar_llvm_ty;
- }
bits = bits + 8;
}
- const params = [_]*llvm.Value{operand};
- const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
+ const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal);
+ const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty});
- const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
+ const params = [_]*llvm.Value{operand.toLlvm(&self.wip)};
+ const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ fn_val,
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
const result_ty = self.typeOfIndex(inst);
- const result_llvm_ty = try o.lowerType(result_ty);
- const result_bits = result_ty.intInfo(mod).bits;
- if (bits > result_bits) {
- return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
- } else if (bits < result_bits) {
- return self.builder.buildZExt(wrong_size_result, result_llvm_ty, "");
- } else {
- return wrong_size_result;
- }
+ return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), "");
}
- fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8897,50 +9355,53 @@ pub const FuncGen = struct {
const error_set_ty = self.air.getRefType(ty_op.ty);
const names = error_set_ty.errorSetNames(mod);
- const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid");
- const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid");
- const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
- const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len)));
+ const valid_block = try self.wip.block(@intCast(names.len), "Valid");
+ const invalid_block = try self.wip.block(1, "Invalid");
+ const end_block = try self.wip.block(2, "End");
+ var wip_switch = try self.wip.@"switch"(operand, invalid_block, @intCast(names.len));
+ defer wip_switch.finish(&self.wip);
for (names) |name| {
- const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
- const this_tag_int_value = try o.lowerValue(.{
- .ty = Type.err_int,
- .val = try mod.intValue(Type.err_int, err_int),
- });
- switch_instr.addCase(this_tag_int_value, valid_block);
- }
- self.builder.positionBuilderAtEnd(valid_block);
- _ = self.builder.buildBr(end_block);
-
- self.builder.positionBuilderAtEnd(invalid_block);
- _ = self.builder.buildBr(end_block);
-
- self.builder.positionBuilderAtEnd(end_block);
-
- const llvm_type = self.context.intType(1);
- const incoming_values: [2]*llvm.Value = .{
- llvm_type.constInt(1, .False), llvm_type.constInt(0, .False),
- };
- const incoming_blocks: [2]*llvm.BasicBlock = .{
- valid_block, invalid_block,
- };
- const phi_node = self.builder.buildPhi(llvm_type, "");
- phi_node.addIncoming(&incoming_values, &incoming_blocks, 2);
- return phi_node;
+ const err_int = mod.global_error_set.getIndex(name).?;
+ const this_tag_int_value = try o.builder.intConst(Builder.Type.err_int, err_int);
+ try wip_switch.addCase(this_tag_int_value, valid_block, &self.wip);
+ }
+ self.wip.cursor = .{ .block = valid_block };
+ _ = try self.wip.br(end_block);
+
+ self.wip.cursor = .{ .block = invalid_block };
+ _ = try self.wip.br(end_block);
+
+ self.wip.cursor = .{ .block = end_block };
+ const phi = try self.wip.phi(.i1, "");
+ try phi.finish(
+ &.{ Builder.Constant.true.toValue(), Builder.Constant.false.toValue() },
+ &.{ valid_block, invalid_block },
+ &self.wip,
+ );
+ return phi.toValue();
}
- fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.typeOf(un_op);
const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty);
- const params = [_]*llvm.Value{operand};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
+ const params = [_]*llvm.Value{operand.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall(
+ llvm_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
+ fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
const mod = o.module;
const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
@@ -8950,185 +9411,207 @@ pub const FuncGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(o.named_enum_map.remove(enum_type.decl));
- var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
- const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)});
-
- const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())};
+ const llvm_fn_name = try o.builder.fmt("__zig_is_named_enum_value_{}", .{
+ fqn.fmt(&mod.intern_pool),
+ });
- const llvm_ret_ty = try o.lowerType(Type.bool);
- const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
- const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type);
+ const fn_type = try o.builder.fnType(.i1, &.{
+ try o.lowerType(enum_type.tag_ty.toType()),
+ }, .normal);
+ const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder));
fn_val.setLinkage(.Internal);
fn_val.setFunctionCallConv(.Fast);
o.addCommonFnAttributes(fn_val);
- gop.value_ptr.* = fn_val;
- const prev_block = self.builder.getInsertBlock();
- const prev_debug_location = self.builder.getCurrentDebugLocation2();
- defer {
- self.builder.positionBuilderAtEnd(prev_block);
- if (self.di_scope != null) {
- self.builder.setCurrentDebugLocation2(prev_debug_location);
- }
+ var global = Builder.Global{
+ .linkage = .internal,
+ .type = fn_type,
+ .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) },
+ };
+ var function = Builder.Function{
+ .global = @enumFromInt(o.builder.globals.count()),
+ };
+ try o.builder.llvm.globals.append(self.gpa, fn_val);
+ _ = try o.builder.addGlobal(llvm_fn_name, global);
+ try o.builder.functions.append(self.gpa, function);
+ gop.value_ptr.* = global.kind.function;
+
+ var wip = try Builder.WipFunction.init(&o.builder, global.kind.function);
+ defer wip.deinit();
+ wip.cursor = .{ .block = try wip.block(0, "Entry") };
+
+ const named_block = try wip.block(@intCast(enum_type.names.len), "Named");
+ const unnamed_block = try wip.block(1, "Unnamed");
+ const tag_int_value = wip.arg(0);
+ var wip_switch = try wip.@"switch"(tag_int_value, unnamed_block, @intCast(enum_type.names.len));
+ defer wip_switch.finish(&wip);
+
+ for (0..enum_type.names.len) |field_index| {
+ const this_tag_int_value = try o.lowerValue(
+ (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
+ );
+ try wip_switch.addCase(this_tag_int_value, named_block, &wip);
}
+ wip.cursor = .{ .block = named_block };
+ _ = try wip.ret(Builder.Constant.true.toValue());
- const entry_block = self.context.appendBasicBlock(fn_val, "Entry");
- self.builder.positionBuilderAtEnd(entry_block);
- self.builder.clearCurrentDebugLocation();
-
- const named_block = self.context.appendBasicBlock(fn_val, "Named");
- const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed");
- const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len)));
-
- for (enum_type.names, 0..) |_, field_index_usize| {
- const field_index = @as(u32, @intCast(field_index_usize));
- const this_tag_int_value = int: {
- break :int try o.lowerValue(.{
- .ty = enum_ty,
- .val = try mod.enumValueFieldIndex(enum_ty, field_index),
- });
- };
- switch_instr.addCase(this_tag_int_value, named_block);
- }
- self.builder.positionBuilderAtEnd(named_block);
- _ = self.builder.buildRet(self.context.intType(1).constInt(1, .False));
+ wip.cursor = .{ .block = unnamed_block };
+ _ = try wip.ret(Builder.Constant.false.toValue());
- self.builder.positionBuilderAtEnd(unnamed_block);
- _ = self.builder.buildRet(self.context.intType(1).constInt(0, .False));
- return fn_val;
+ try wip.finish();
+ return global.kind.function;
}
- fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
+ const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const enum_ty = self.typeOf(un_op);
const llvm_fn = try self.getEnumTagNameFunction(enum_ty);
- const params = [_]*llvm.Value{operand};
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
+ const llvm_fn_ty = llvm_fn.typeOf(&o.builder);
+ const params = [_]*llvm.Value{operand.toLlvm(&self.wip)};
+ return (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish(
+ self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .Fast,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
}
- fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
+ fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
const mod = o.module;
const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
// TODO: detect when the type changes and re-emit this function.
const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl);
- if (gop.found_existing) return gop.value_ptr.*;
+ if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function;
errdefer assert(o.decl_map.remove(enum_type.decl));
- var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
- const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)});
+ const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)});
- const slice_ty = Type.slice_const_u8_sentinel_0;
- const llvm_ret_ty = try o.lowerType(slice_ty);
- const usize_llvm_ty = try o.lowerType(Type.usize);
- const slice_alignment = slice_ty.abiAlignment(mod);
-
- const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())};
+ const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0);
+ const usize_ty = try o.lowerType(Type.usize);
- const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
- const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type);
+ const fn_type = try o.builder.fnType(ret_ty, &.{
+ try o.lowerType(enum_type.tag_ty.toType()),
+ }, .normal);
+ const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder));
fn_val.setLinkage(.Internal);
fn_val.setFunctionCallConv(.Fast);
o.addCommonFnAttributes(fn_val);
- gop.value_ptr.* = fn_val;
-
- const prev_block = self.builder.getInsertBlock();
- const prev_debug_location = self.builder.getCurrentDebugLocation2();
- defer {
- self.builder.positionBuilderAtEnd(prev_block);
- if (self.di_scope != null) {
- self.builder.setCurrentDebugLocation2(prev_debug_location);
- }
- }
-
- const entry_block = self.context.appendBasicBlock(fn_val, "Entry");
- self.builder.positionBuilderAtEnd(entry_block);
- self.builder.clearCurrentDebugLocation();
-
- const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue");
- const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len)));
- const array_ptr_indices = [_]*llvm.Value{
- usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
+ var global = Builder.Global{
+ .linkage = .internal,
+ .type = fn_type,
+ .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) },
};
-
- for (enum_type.names, 0..) |name_ip, field_index_usize| {
- const field_index = @as(u32, @intCast(field_index_usize));
- const name = mod.intern_pool.stringToSlice(name_ip);
- const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
- const str_init_llvm_ty = str_init.typeOf();
- const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, "");
- str_global.setInitializer(str_init);
- str_global.setLinkage(.Private);
- str_global.setGlobalConstant(.True);
- str_global.setUnnamedAddr(.True);
- str_global.setAlignment(1);
-
- const slice_fields = [_]*llvm.Value{
- str_init_llvm_ty.constInBoundsGEP(str_global, &array_ptr_indices, array_ptr_indices.len),
- usize_llvm_ty.constInt(name.len, .False),
+ var function = Builder.Function{
+ .global = @enumFromInt(o.builder.globals.count()),
+ };
+ try o.builder.llvm.globals.append(self.gpa, fn_val);
+ gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global);
+ try o.builder.functions.append(self.gpa, function);
+
+ var wip = try Builder.WipFunction.init(&o.builder, global.kind.function);
+ defer wip.deinit();
+ wip.cursor = .{ .block = try wip.block(0, "Entry") };
+
+ const bad_value_block = try wip.block(1, "BadValue");
+ const tag_int_value = wip.arg(0);
+ var wip_switch =
+ try wip.@"switch"(tag_int_value, bad_value_block, @intCast(enum_type.names.len));
+ defer wip_switch.finish(&wip);
+
+ for (enum_type.names, 0..) |name_ip, field_index| {
+ const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip));
+ const str_init = try o.builder.stringNullConst(name);
+ const str_ty = str_init.typeOf(&o.builder);
+ const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), "");
+ str_llvm_global.setInitializer(str_init.toLlvm(&o.builder));
+ str_llvm_global.setLinkage(.Private);
+ str_llvm_global.setGlobalConstant(.True);
+ str_llvm_global.setUnnamedAddr(.True);
+ str_llvm_global.setAlignment(1);
+
+ var str_global = Builder.Global{
+ .linkage = .private,
+ .unnamed_addr = .unnamed_addr,
+ .type = str_ty,
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
};
- const slice_init = llvm_ret_ty.constNamedStruct(&slice_fields, slice_fields.len);
- const slice_global = o.llvm_module.addGlobal(slice_init.typeOf(), "");
- slice_global.setInitializer(slice_init);
- slice_global.setLinkage(.Private);
- slice_global.setGlobalConstant(.True);
- slice_global.setUnnamedAddr(.True);
- slice_global.setAlignment(slice_alignment);
-
- const return_block = self.context.appendBasicBlock(fn_val, "Name");
- const this_tag_int_value = try o.lowerValue(.{
- .ty = enum_ty,
- .val = try mod.enumValueFieldIndex(enum_ty, field_index),
+ var str_variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ .mutability = .constant,
+ .init = str_init,
+ .alignment = comptime Builder.Alignment.fromByteUnits(1),
+ };
+ try o.builder.llvm.globals.append(o.gpa, str_llvm_global);
+ const global_index = try o.builder.addGlobal(.empty, str_global);
+ try o.builder.variables.append(o.gpa, str_variable);
+
+ const slice_val = try o.builder.structValue(ret_ty, &.{
+ global_index.toConst(),
+ try o.builder.intConst(usize_ty, name.toSlice(&o.builder).?.len),
});
- switch_instr.addCase(this_tag_int_value, return_block);
- self.builder.positionBuilderAtEnd(return_block);
- const loaded = self.builder.buildLoad(llvm_ret_ty, slice_global, "");
- loaded.setAlignment(slice_alignment);
- _ = self.builder.buildRet(loaded);
+ const return_block = try wip.block(1, "Name");
+ const this_tag_int_value = try o.lowerValue(
+ (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
+ );
+ try wip_switch.addCase(this_tag_int_value, return_block, &wip);
+
+ wip.cursor = .{ .block = return_block };
+ _ = try wip.ret(slice_val);
}
- self.builder.positionBuilderAtEnd(bad_value_block);
- _ = self.builder.buildUnreachable();
- return fn_val;
+ wip.cursor = .{ .block = bad_value_block };
+ _ = try wip.@"unreachable"();
+
+ try wip.finish();
+ return global.kind.function;
}
- fn getCmpLtErrorsLenFunction(self: *FuncGen) !*llvm.Value {
+ fn getCmpLtErrorsLenFunction(self: *FuncGen) !Builder.Function.Index {
const o = self.dg.object;
- if (o.llvm_module.getNamedFunction(lt_errors_fn_name)) |llvm_fn| {
- return llvm_fn;
- }
+ const name = try o.builder.string(lt_errors_fn_name);
+ if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function;
// Function signature: fn (anyerror) bool
- const ret_llvm_ty = try o.lowerType(Type.bool);
- const anyerror_llvm_ty = try o.lowerType(Type.anyerror);
- const param_types = [_]*llvm.Type{anyerror_llvm_ty};
+ const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal);
+ const llvm_fn = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder));
- const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
- const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type);
llvm_fn.setLinkage(.Internal);
llvm_fn.setFunctionCallConv(.Fast);
o.addCommonFnAttributes(llvm_fn);
- return llvm_fn;
+
+ var global = Builder.Global{
+ .linkage = .internal,
+ .type = fn_type,
+ .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) },
+ };
+ var function = Builder.Function{
+ .global = @enumFromInt(o.builder.globals.count()),
+ };
+
+ try o.builder.llvm.globals.append(self.gpa, llvm_fn);
+ _ = try o.builder.addGlobal(name, global);
+ try o.builder.functions.append(self.gpa, function);
+ return global.kind.function;
}
- fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
@@ -9136,34 +9619,32 @@ pub const FuncGen = struct {
const slice_llvm_ty = try o.lowerType(slice_ty);
const error_name_table_ptr = try self.getErrorNameTable();
- const ptr_slice_llvm_ty = self.context.pointerType(0);
- const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, "");
- const indices = [_]*llvm.Value{operand};
- const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, "");
- return self.builder.buildLoad(slice_llvm_ty, error_name_ptr, "");
+ const error_name_table =
+ try self.wip.load(.normal, .ptr, error_name_table_ptr.toValue(&o.builder), .default, "");
+ const error_name_ptr =
+ try self.wip.gep(.inbounds, slice_llvm_ty, error_name_table, &.{operand}, "");
+ return self.wip.load(.normal, slice_llvm_ty, error_name_ptr, .default, "");
}
- fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
- const mod = o.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen(mod);
- return self.builder.buildVectorSplat(len, scalar, "");
+ return self.wip.splatVector(try o.lowerType(vector_ty), scalar, "");
}
- fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const pred = try self.resolveInst(pl_op.operand);
const a = try self.resolveInst(extra.lhs);
const b = try self.resolveInst(extra.rhs);
- return self.builder.buildSelect(pred, a, b, "");
+ return self.wip.select(pred, a, b, "");
}
- fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -9179,24 +9660,25 @@ pub const FuncGen = struct {
// when changing code, so Zig uses negative numbers to index the
// second vector. These start at -1 and go down, and are easiest to use
// with the ~ operator. Here we convert between the two formats.
- const values = try self.gpa.alloc(*llvm.Value, mask_len);
+ const values = try self.gpa.alloc(Builder.Constant, mask_len);
defer self.gpa.free(values);
- const llvm_i32 = self.context.intType(32);
-
for (values, 0..) |*val, i| {
const elem = try mask.elemValue(mod, i);
if (elem.isUndef(mod)) {
- val.* = llvm_i32.getUndef();
+ val.* = try o.builder.undefConst(.i32);
} else {
const int = elem.toSignedInt(mod);
- const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len));
- val.* = llvm_i32.constInt(unsigned, .False);
+ const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len);
+ val.* = try o.builder.intConst(.i32, unsigned);
}
}
- const llvm_mask_value = llvm.constVector(values.ptr, mask_len);
- return self.builder.buildShuffleVector(a, b, llvm_mask_value, "");
+ const llvm_mask_value = try o.builder.vectorValue(
+ try o.builder.vectorType(.normal, mask_len, .i32),
+ values,
+ );
+ return self.wip.shuffleVector(a, b, llvm_mask_value, "");
}
/// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result.
@@ -9213,58 +9695,69 @@ pub const FuncGen = struct {
///
fn buildReducedCall(
self: *FuncGen,
- llvm_fn: *llvm.Value,
- operand_vector: *llvm.Value,
+ llvm_fn: Builder.Function.Index,
+ operand_vector: Builder.Value,
vector_len: usize,
- accum_init: *llvm.Value,
- ) !*llvm.Value {
+ accum_init: Builder.Value,
+ ) !Builder.Value {
const o = self.dg.object;
- const llvm_usize_ty = try o.lowerType(Type.usize);
- const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False);
- const llvm_result_ty = accum_init.typeOf();
+ const usize_ty = try o.lowerType(Type.usize);
+ const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len);
+ const llvm_result_ty = accum_init.typeOfWip(&self.wip);
// Allocate and initialize our mutable variables
- const i_ptr = self.buildAlloca(llvm_usize_ty, null);
- _ = self.builder.buildStore(llvm_usize_ty.constInt(0, .False), i_ptr);
- const accum_ptr = self.buildAlloca(llvm_result_ty, null);
- _ = self.builder.buildStore(accum_init, accum_ptr);
+ const i_ptr = try self.buildAlloca(usize_ty, .default);
+ _ = try self.wip.store(.normal, try o.builder.intValue(usize_ty, 0), i_ptr, .default);
+ const accum_ptr = try self.buildAlloca(llvm_result_ty, .default);
+ _ = try self.wip.store(.normal, accum_init, accum_ptr, .default);
// Setup the loop
- const loop = self.context.appendBasicBlock(self.llvm_func, "ReduceLoop");
- const loop_exit = self.context.appendBasicBlock(self.llvm_func, "AfterReduce");
- _ = self.builder.buildBr(loop);
+ const loop = try self.wip.block(2, "ReduceLoop");
+ const loop_exit = try self.wip.block(1, "AfterReduce");
+ _ = try self.wip.br(loop);
{
- self.builder.positionBuilderAtEnd(loop);
+ self.wip.cursor = .{ .block = loop };
// while (i < vec.len)
- const i = self.builder.buildLoad(llvm_usize_ty, i_ptr, "");
- const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len, "");
- const loop_then = self.context.appendBasicBlock(self.llvm_func, "ReduceLoopThen");
+ const i = try self.wip.load(.normal, usize_ty, i_ptr, .default, "");
+ const cond = try self.wip.icmp(.ult, i, llvm_vector_len, "");
+ const loop_then = try self.wip.block(1, "ReduceLoopThen");
- _ = self.builder.buildCondBr(cond, loop_then, loop_exit);
+ _ = try self.wip.brCond(cond, loop_then, loop_exit);
{
- self.builder.positionBuilderAtEnd(loop_then);
+ self.wip.cursor = .{ .block = loop_then };
// accum = f(accum, vec[i]);
- const accum = self.builder.buildLoad(llvm_result_ty, accum_ptr, "");
- const element = self.builder.buildExtractElement(operand_vector, i, "");
- const params = [2]*llvm.Value{ accum, element };
- const new_accum = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
- _ = self.builder.buildStore(new_accum, accum_ptr);
+ const accum = try self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, "");
+ const element = try self.wip.extractElement(operand_vector, i, "");
+ const params = [2]*llvm.Value{ accum.toLlvm(&self.wip), element.toLlvm(&self.wip) };
+ const new_accum = (try self.wip.unimplemented(llvm_result_ty, "")).finish(
+ self.builder.buildCall(
+ llvm_fn.typeOf(&o.builder).toLlvm(&o.builder),
+ llvm_fn.toLlvm(&o.builder),
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ),
+ &self.wip,
+ );
+ _ = try self.wip.store(.normal, new_accum, accum_ptr, .default);
// i += 1
- const new_i = self.builder.buildAdd(i, llvm_usize_ty.constInt(1, .False), "");
- _ = self.builder.buildStore(new_i, i_ptr);
- _ = self.builder.buildBr(loop);
+ const new_i = try self.wip.bin(.add, i, try o.builder.intValue(usize_ty, 1), "");
+ _ = try self.wip.store(.normal, new_i, i_ptr, .default);
+ _ = try self.wip.br(loop);
}
}
- self.builder.positionBuilderAtEnd(loop_exit);
- return self.builder.buildLoad(llvm_result_ty, accum_ptr, "");
+ self.wip.cursor = .{ .block = loop_exit };
+ return self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, "");
}
- fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
+ fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value {
self.builder.setFastMath(want_fast_math);
const o = self.dg.object;
const mod = o.module;
@@ -9274,40 +9767,70 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(reduce.operand);
const operand_ty = self.typeOf(reduce.operand);
const scalar_ty = self.typeOfIndex(inst);
+ const llvm_scalar_ty = try o.lowerType(scalar_ty);
switch (reduce.operation) {
- .And => return self.builder.buildAndReduce(operand),
- .Or => return self.builder.buildOrReduce(operand),
- .Xor => return self.builder.buildXorReduce(operand),
+ .And => return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildAndReduce(operand.toLlvm(&self.wip)), &self.wip),
+ .Or => return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildOrReduce(operand.toLlvm(&self.wip)), &self.wip),
+ .Xor => return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildXorReduce(operand.toLlvm(&self.wip)), &self.wip),
.Min => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)),
+ .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish(
+ self.builder.buildIntMinReduce(
+ operand.toLlvm(&self.wip),
+ scalar_ty.isSignedInt(mod),
+ ),
+ &self.wip,
+ ),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
- return self.builder.buildFPMinReduce(operand);
+ return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildFPMinReduce(operand.toLlvm(&self.wip)), &self.wip);
},
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)),
+ .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish(
+ self.builder.buildIntMaxReduce(
+ operand.toLlvm(&self.wip),
+ scalar_ty.isSignedInt(mod),
+ ),
+ &self.wip,
+ ),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
- return self.builder.buildFPMaxReduce(operand);
+ return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildFPMaxReduce(operand.toLlvm(&self.wip)), &self.wip);
},
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => return self.builder.buildAddReduce(operand),
+ .Int => return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildAddReduce(operand.toLlvm(&self.wip)), &self.wip),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
- const scalar_llvm_ty = try o.lowerType(scalar_ty);
- const neutral_value = scalar_llvm_ty.constReal(-0.0);
- return self.builder.buildFPAddReduce(neutral_value, operand);
+ const neutral_value = try o.builder.fpConst(llvm_scalar_ty, -0.0);
+ return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish(
+ self.builder.buildFPAddReduce(
+ neutral_value.toLlvm(&o.builder),
+ operand.toLlvm(&self.wip),
+ ),
+ &self.wip,
+ );
},
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag(mod)) {
- .Int => return self.builder.buildMulReduce(operand),
+ .Int => return (try self.wip.unimplemented(llvm_scalar_ty, ""))
+ .finish(self.builder.buildMulReduce(operand.toLlvm(&self.wip)), &self.wip),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
- const scalar_llvm_ty = try o.lowerType(scalar_ty);
- const neutral_value = scalar_llvm_ty.constReal(1.0);
- return self.builder.buildFPMulReduce(neutral_value, operand);
+ const neutral_value = try o.builder.fpConst(llvm_scalar_ty, 1.0);
+ return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish(
+ self.builder.buildFPMulReduce(
+ neutral_value.toLlvm(&o.builder),
+ operand.toLlvm(&self.wip),
+ ),
+ &self.wip,
+ );
},
else => unreachable,
},
@@ -9315,58 +9838,71 @@ pub const FuncGen = struct {
// Reduction could not be performed with intrinsics.
// Use a manual loop over a softfloat call instead.
- var fn_name_buf: [64]u8 = undefined;
const float_bits = scalar_ty.floatBits(target);
const fn_name = switch (reduce.operation) {
- .Min => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
+ .Min => try o.builder.fmt("{s}fmin{s}", .{
libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- .Max => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
+ }),
+ .Max => try o.builder.fmt("{s}fmax{s}", .{
libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
- }) catch unreachable,
- .Add => std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
+ }),
+ .Add => try o.builder.fmt("__add{s}f3", .{
compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
- .Mul => std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
+ }),
+ .Mul => try o.builder.fmt("__mul{s}f3", .{
compilerRtFloatAbbrev(float_bits),
- }) catch unreachable,
+ }),
else => unreachable,
};
- const param_llvm_ty = try o.lowerType(scalar_ty);
- const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty };
- const libc_fn = self.getLibcFunction(fn_name, &param_types, param_llvm_ty);
- const init_value = try o.lowerValue(.{
- .ty = scalar_ty,
- .val = try mod.floatValue(scalar_ty, switch (reduce.operation) {
- .Min => std.math.nan(f32),
- .Max => std.math.nan(f32),
- .Add => -0.0,
- .Mul => 1.0,
- else => unreachable,
- }),
- });
- return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value);
+ const libc_fn =
+ try self.getLibcFunction(fn_name, &.{ llvm_scalar_ty, llvm_scalar_ty }, llvm_scalar_ty);
+ const init_val = switch (llvm_scalar_ty) {
+ .i16 => try o.builder.intValue(.i16, @as(i16, @bitCast(
+ @as(f16, switch (reduce.operation) {
+ .Min, .Max => std.math.nan(f16),
+ .Add => -0.0,
+ .Mul => 1.0,
+ else => unreachable,
+ }),
+ ))),
+ .i80 => try o.builder.intValue(.i80, @as(i80, @bitCast(
+ @as(f80, switch (reduce.operation) {
+ .Min, .Max => std.math.nan(f80),
+ .Add => -0.0,
+ .Mul => 1.0,
+ else => unreachable,
+ }),
+ ))),
+ .i128 => try o.builder.intValue(.i128, @as(i128, @bitCast(
+ @as(f128, switch (reduce.operation) {
+ .Min, .Max => std.math.nan(f128),
+ .Add => -0.0,
+ .Mul => 1.0,
+ else => unreachable,
+ }),
+ ))),
+ else => unreachable,
+ };
+ return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_val);
}
- fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
- const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
+ const len: usize = @intCast(result_ty.arrayLen(mod));
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try o.lowerType(result_ty);
switch (result_ty.zigTypeTag(mod)) {
.Vector => {
- const llvm_u32 = self.context.intType(32);
-
- var vector = llvm_result_ty.getUndef();
+ var vector = try o.builder.poisonValue(llvm_result_ty);
for (elements, 0..) |elem, i| {
- const index_u32 = llvm_u32.constInt(i, .False);
+ const index_u32 = try o.builder.intValue(.i32, i);
const llvm_elem = try self.resolveInst(elem);
- vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
+ vector = try self.wip.insertElement(vector, llvm_elem, index_u32, "");
}
return vector;
},
@@ -9375,48 +9911,47 @@ pub const FuncGen = struct {
const struct_obj = mod.typeToStruct(result_ty).?;
assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod);
- const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
+ const int_ty = try o.builder.intType(@intCast(big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *llvm.Value = int_llvm_ty.constNull();
+ var running_int = try o.builder.intValue(int_ty, 0);
var running_bits: u16 = 0;
for (elements, 0..) |elem, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
- const small_int_ty = self.context.intType(ty_bit_size);
+ const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
+ const small_int_ty = try o.builder.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
- self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
+ try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
- self.builder.buildBitCast(non_int_val, small_int_ty, "");
- const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
+ try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
+ const shift_rhs = try o.builder.intValue(int_ty, running_bits);
// If the field is as large as the entire packed struct, this
// zext would go from, e.g. i16 to i16. This is legal with
// constZExtOrBitCast but not legal with constZExt.
- const extended_int_val = self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, "");
- const shifted = self.builder.buildShl(extended_int_val, shift_rhs, "");
- running_int = self.builder.buildOr(running_int, shifted, "");
+ const extended_int_val = try self.wip.conv(.unsigned, small_int_val, int_ty, "");
+ const shifted = try self.wip.bin(.shl, extended_int_val, shift_rhs, "");
+ running_int = try self.wip.bin(.@"or", running_int, shifted, "");
running_bits += ty_bit_size;
}
return running_int;
}
if (isByRef(result_ty, mod)) {
- const llvm_u32 = self.context.intType(32);
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
+ const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
+ const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
- var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
for (elements, 0..) |elem, i| {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = llvmField(result_ty, i, mod).?.index;
- indices[1] = llvm_u32.constInt(llvm_i, .False);
- const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
+ const field_ptr =
+ try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, "");
const field_ptr_ty = try mod.ptrType(.{
.child = self.typeOf(elem).toIntern(),
.flags = .{
@@ -9425,18 +9960,18 @@ pub const FuncGen = struct {
),
},
});
- try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
}
return alloca_inst;
} else {
- var result = llvm_result_ty.getUndef();
+ var result = try o.builder.poisonValue(llvm_result_ty);
for (elements, 0..) |elem, i| {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = llvmField(result_ty, i, mod).?.index;
- result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
+ result = try self.wip.insertValue(result, llvm_elem, &.{llvm_i}, "");
}
return result;
}
@@ -9445,7 +9980,9 @@ pub const FuncGen = struct {
assert(isByRef(result_ty, mod));
const llvm_usize = try o.lowerType(Type.usize);
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
+ const usize_zero = try o.builder.intValue(llvm_usize, 0);
+ const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
+ const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
const array_info = result_ty.arrayInfo(mod);
const elem_ptr_ty = try mod.ptrType(.{
@@ -9453,26 +9990,21 @@ pub const FuncGen = struct {
});
for (elements, 0..) |elem, i| {
- const indices: [2]*llvm.Value = .{
- llvm_usize.constNull(),
- llvm_usize.constInt(@as(c_uint, @intCast(i)), .False),
- };
- const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
+ const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{
+ usize_zero, try o.builder.intValue(llvm_usize, i),
+ }, "");
const llvm_elem = try self.resolveInst(elem);
- try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .none);
}
if (array_info.sentinel) |sent_val| {
- const indices: [2]*llvm.Value = .{
- llvm_usize.constNull(),
- llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False),
- };
- const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
+ const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{
+ usize_zero, try o.builder.intValue(llvm_usize, array_info.len),
+ }, "");
const llvm_elem = try self.resolveValue(.{
.ty = array_info.elem_type,
.val = sent_val,
});
-
- try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
+ try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toValue(), .none);
}
return alloca_inst;
@@ -9481,7 +10013,7 @@ pub const FuncGen = struct {
}
}
- fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -9493,16 +10025,15 @@ pub const FuncGen = struct {
if (union_obj.layout == .Packed) {
const big_bits = union_ty.bitSize(mod);
- const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
+ const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
const field = union_obj.fields.values()[extra.field_index];
const non_int_val = try self.resolveInst(extra.init);
- const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
- const small_int_ty = self.context.intType(ty_bit_size);
+ const small_int_ty = try o.builder.intType(@intCast(field.ty.bitSize(mod)));
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
- self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
+ try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
- self.builder.buildBitCast(non_int_val, small_int_ty, "");
- return self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, "");
+ try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
+ return self.wip.conv(.unsigned, small_int_val, int_llvm_ty, "");
}
const tag_int = blk: {
@@ -9515,106 +10046,96 @@ pub const FuncGen = struct {
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
- return null;
+ return .none;
}
assert(!isByRef(union_ty, mod));
- return union_llvm_ty.constInt(tag_int, .False);
+ return o.builder.intValue(union_llvm_ty, tag_int);
}
assert(isByRef(union_ty, mod));
// The llvm type of the alloca will be the named LLVM union type, and will not
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
// the fields appropriately.
- const result_ptr = self.buildAlloca(union_llvm_ty, layout.abi_align);
+ const alignment = Builder.Alignment.fromByteUnits(layout.abi_align);
+ const result_ptr = try self.buildAlloca(union_llvm_ty, alignment);
const llvm_payload = try self.resolveInst(extra.init);
assert(union_obj.haveFieldTypes());
const field = union_obj.fields.values()[extra.field_index];
const field_llvm_ty = try o.lowerType(field.ty);
const field_size = field.ty.abiSize(mod);
const field_align = field.normalAlignment(mod);
+ const llvm_usize = try o.lowerType(Type.usize);
+ const usize_zero = try o.builder.intValue(llvm_usize, 0);
+ const i32_zero = try o.builder.intValue(.i32, 0);
const llvm_union_ty = t: {
- const payload = p: {
+ const payload_ty = p: {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const padding_len = @as(c_uint, @intCast(layout.payload_size));
- break :p self.context.intType(8).arrayType(padding_len);
+ const padding_len = layout.payload_size;
+ break :p try o.builder.arrayType(padding_len, .i8);
}
if (field_size == layout.payload_size) {
break :p field_llvm_ty;
}
- const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
- const fields: [2]*llvm.Type = .{
- field_llvm_ty, self.context.intType(8).arrayType(padding_len),
- };
- break :p self.context.structType(&fields, fields.len, .True);
+ const padding_len = layout.payload_size - field_size;
+ break :p try o.builder.structType(.@"packed", &.{
+ field_llvm_ty, try o.builder.arrayType(padding_len, .i8),
+ });
};
- if (layout.tag_size == 0) {
- const fields: [1]*llvm.Type = .{payload};
- break :t self.context.structType(&fields, fields.len, .False);
- }
- const tag_llvm_ty = try o.lowerType(union_obj.tag_ty);
- var fields: [3]*llvm.Type = undefined;
- var fields_len: c_uint = 2;
+ if (layout.tag_size == 0) break :t try o.builder.structType(.normal, &.{payload_ty});
+ const tag_ty = try o.lowerType(union_obj.tag_ty);
+ var fields: [3]Builder.Type = undefined;
+ var fields_len: usize = 2;
if (layout.tag_align >= layout.payload_align) {
- fields = .{ tag_llvm_ty, payload, undefined };
+ fields = .{ tag_ty, payload_ty, undefined };
} else {
- fields = .{ payload, tag_llvm_ty, undefined };
+ fields = .{ payload_ty, tag_ty, undefined };
}
if (layout.padding != 0) {
- fields[2] = self.context.intType(8).arrayType(layout.padding);
- fields_len = 3;
+ fields[fields_len] = try o.builder.arrayType(layout.padding, .i8);
+ fields_len += 1;
}
- break :t self.context.structType(&fields, fields_len, .False);
+ break :t try o.builder.structType(.normal, fields[0..fields_len]);
};
// Now we follow the layout as expressed above with GEP instructions to set the
// tag and the payload.
- const index_type = self.context.intType(32);
-
const field_ptr_ty = try mod.ptrType(.{
.child = field.ty.toIntern(),
- .flags = .{
- .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align),
- },
+ .flags = .{ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align) },
});
if (layout.tag_size == 0) {
- const indices: [3]*llvm.Value = .{
- index_type.constNull(),
- index_type.constNull(),
- index_type.constNull(),
- };
- const len: c_uint = if (field_size == layout.payload_size) 2 else 3;
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, "");
- try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
+ const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero };
+ const len: usize = if (field_size == layout.payload_size) 2 else 3;
+ const field_ptr =
+ try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], "");
+ try self.store(field_ptr, field_ptr_ty, llvm_payload, .none);
return result_ptr;
}
{
- const indices: [3]*llvm.Value = .{
- index_type.constNull(),
- index_type.constInt(@intFromBool(layout.tag_align >= layout.payload_align), .False),
- index_type.constNull(),
- };
- const len: c_uint = if (field_size == layout.payload_size) 2 else 3;
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, "");
- try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
+ const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
+ const indices: [3]Builder.Value =
+ .{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero };
+ const len: usize = if (field_size == layout.payload_size) 2 else 3;
+ const field_ptr =
+ try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], "");
+ try self.store(field_ptr, field_ptr_ty, llvm_payload, .none);
}
{
- const indices: [2]*llvm.Value = .{
- index_type.constNull(),
- index_type.constInt(@intFromBool(layout.tag_align < layout.payload_align), .False),
- };
- const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, "");
- const tag_llvm_ty = try o.lowerType(union_obj.tag_ty);
- const llvm_tag = tag_llvm_ty.constInt(tag_int, .False);
- const store_inst = self.builder.buildStore(llvm_tag, field_ptr);
- store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod));
+ const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
+ const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) };
+ const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
+ const tag_ty = try o.lowerType(union_obj.tag_ty);
+ const llvm_tag = try o.builder.intValue(tag_ty, tag_int);
+ const tag_alignment = Builder.Alignment.fromByteUnits(union_obj.tag_ty.abiAlignment(mod));
+ _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
return result_ptr;
}
- fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const prefetch = self.air.instructions.items(.data)[inst].prefetch;
@@ -9643,10 +10164,10 @@ pub const FuncGen = struct {
.powerpcle,
.powerpc64,
.powerpc64le,
- => return null,
+ => return .none,
.arm, .armeb, .thumb, .thumbeb => {
switch (prefetch.rw) {
- .write => return null,
+ .write => return .none,
else => {},
}
},
@@ -9655,58 +10176,64 @@ pub const FuncGen = struct {
.data => {},
}
- const llvm_ptr_u8 = self.context.pointerType(0);
- const llvm_u32 = self.context.intType(32);
-
const llvm_fn_name = "llvm.prefetch.p0";
- const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
- // declare void @llvm.prefetch(i8*, i32, i32, i32)
- const llvm_void = self.context.voidType();
- const param_types = [_]*llvm.Type{
- llvm_ptr_u8, llvm_u32, llvm_u32, llvm_u32,
- };
- const fn_type = llvm.functionType(llvm_void, &param_types, param_types.len, .False);
- break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type);
- };
+ // declare void @llvm.prefetch(i8*, i32, i32, i32)
+ const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal);
+ const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse
+ o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder));
const ptr = try self.resolveInst(prefetch.ptr);
const params = [_]*llvm.Value{
- ptr,
- llvm_u32.constInt(@intFromEnum(prefetch.rw), .False),
- llvm_u32.constInt(prefetch.locality, .False),
- llvm_u32.constInt(@intFromEnum(prefetch.cache), .False),
+ ptr.toLlvm(&self.wip),
+ (try o.builder.intConst(.i32, @intFromEnum(prefetch.rw))).toLlvm(&o.builder),
+ (try o.builder.intConst(.i32, prefetch.locality)).toLlvm(&o.builder),
+ (try o.builder.intConst(.i32, @intFromEnum(prefetch.cache))).toLlvm(&o.builder),
};
- _ = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
- return null;
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall(
+ llvm_fn_ty.toLlvm(&o.builder),
+ fn_val,
+ &params,
+ params.len,
+ .C,
+ .Auto,
+ "",
+ ), &self.wip);
+ return .none;
}
- fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const inst_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- const llvm_dest_ty = try o.lowerType(inst_ty);
- return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, "");
+ return self.wip.cast(.addrspacecast, operand, try o.lowerType(inst_ty), "");
}
- fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value {
- const llvm_u32 = self.context.intType(32);
-
+ fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !Builder.Value {
+ const o = self.dg.object;
const llvm_fn_name = switch (dimension) {
0 => basename ++ ".x",
1 => basename ++ ".y",
2 => basename ++ ".z",
- else => return llvm_u32.constInt(default, .False),
+ else => return o.builder.intValue(.i32, default),
};
const args: [0]*llvm.Value = .{};
- const llvm_fn = self.getIntrinsic(llvm_fn_name, &.{});
- return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ const llvm_fn = try self.getIntrinsic(llvm_fn_name, &.{});
+ return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(.i32, &.{}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
}
- fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const target = o.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -9716,37 +10243,41 @@ pub const FuncGen = struct {
return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workitem.id");
}
- fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const target = o.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const dimension = pl_op.payload;
- const llvm_u32 = self.context.intType(32);
- if (dimension >= 3) {
- return llvm_u32.constInt(1, .False);
- }
+ if (dimension >= 3) return o.builder.intValue(.i32, 1);
// Fetch the dispatch pointer, which points to this structure:
// https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913
- const llvm_fn = self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{});
+ const llvm_fn = try self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{});
const args: [0]*llvm.Value = .{};
- const dispatch_ptr = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- dispatch_ptr.setAlignment(4);
+ const llvm_ret_ty = try o.builder.ptrType(Builder.AddrSpace.amdgpu.constant);
+ const dispatch_ptr = (try self.wip.unimplemented(llvm_ret_ty, "")).finish(self.builder.buildCall(
+ (try o.builder.fnType(llvm_ret_ty, &.{}, .normal)).toLlvm(&o.builder),
+ llvm_fn,
+ &args,
+ args.len,
+ .Fast,
+ .Auto,
+ "",
+ ), &self.wip);
+ o.addAttrInt(dispatch_ptr.toLlvm(&self.wip), 0, "align", 4);
// Load the work_group_* member from the struct as u16.
// Just treat the dispatch pointer as an array of u16 to keep things simple.
- const offset = 2 + dimension;
- const index = [_]*llvm.Value{llvm_u32.constInt(offset, .False)};
- const llvm_u16 = self.context.intType(16);
- const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, "");
- const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, "");
- workgroup_size.setAlignment(2);
- return workgroup_size;
+ const workgroup_size_ptr = try self.wip.gep(.inbounds, .i16, dispatch_ptr, &.{
+ try o.builder.intValue(try o.lowerType(Type.usize), 2 + dimension),
+ }, "");
+ const workgroup_size_alignment = comptime Builder.Alignment.fromByteUnits(2);
+ return self.wip.load(.normal, .i16, workgroup_size_ptr, workgroup_size_alignment, "");
}
- fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const target = o.module.getTarget();
assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures
@@ -9756,65 +10287,82 @@ pub const FuncGen = struct {
return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workgroup.id");
}
- fn getErrorNameTable(self: *FuncGen) !*llvm.Value {
+ fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index {
const o = self.dg.object;
- if (o.error_name_table) |table| {
- return table;
- }
+ const table = o.error_name_table;
+ if (table != .none) return table;
const mod = o.module;
const slice_ty = Type.slice_const_u8_sentinel_0;
const slice_alignment = slice_ty.abiAlignment(mod);
- const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
+ const undef_init = try o.builder.undefConst(.ptr); // TODO: Address space
- const error_name_table_global = o.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
- error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef());
+ const name = try o.builder.string("__zig_err_name_table");
+ const error_name_table_global = o.llvm_module.addGlobal(Builder.Type.ptr.toLlvm(&o.builder), name.toSlice(&o.builder).?);
+ error_name_table_global.setInitializer(undef_init.toLlvm(&o.builder));
error_name_table_global.setLinkage(.Private);
error_name_table_global.setGlobalConstant(.True);
error_name_table_global.setUnnamedAddr(.True);
error_name_table_global.setAlignment(slice_alignment);
- o.error_name_table = error_name_table_global;
- return error_name_table_global;
+ var global = Builder.Global{
+ .linkage = .private,
+ .unnamed_addr = .unnamed_addr,
+ .type = .ptr,
+ .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) },
+ };
+ var variable = Builder.Variable{
+ .global = @enumFromInt(o.builder.globals.count()),
+ .mutability = .constant,
+ .init = undef_init,
+ .alignment = Builder.Alignment.fromByteUnits(slice_alignment),
+ };
+ try o.builder.llvm.globals.append(o.gpa, error_name_table_global);
+ _ = try o.builder.addGlobal(name, global);
+ try o.builder.variables.append(o.gpa, variable);
+
+ o.error_name_table = global.kind.variable;
+ return global.kind.variable;
}
/// Assumes the optional is not pointer-like and payload has bits.
- fn optIsNonNull(
+ fn optCmpNull(
self: *FuncGen,
- opt_llvm_ty: *llvm.Type,
- opt_handle: *llvm.Value,
+ cond: Builder.IntegerCondition,
+ opt_llvm_ty: Builder.Type,
+ opt_handle: Builder.Value,
is_by_ref: bool,
- ) *llvm.Value {
- const non_null_llvm_ty = self.context.intType(8);
+ ) Allocator.Error!Builder.Value {
+ const o = self.dg.object;
const field = b: {
if (is_by_ref) {
- const field_ptr = self.builder.buildStructGEP(opt_llvm_ty, opt_handle, 1, "");
- break :b self.builder.buildLoad(non_null_llvm_ty, field_ptr, "");
+ const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, "");
+ break :b try self.wip.load(.normal, .i8, field_ptr, .default, "");
}
- break :b self.builder.buildExtractValue(opt_handle, 1, "");
+ break :b try self.wip.extractValue(opt_handle, &.{1}, "");
};
comptime assert(optional_layout_version == 3);
- return self.builder.buildICmp(.NE, field, non_null_llvm_ty.constInt(0, .False), "");
+ return self.wip.icmp(cond, field, try o.builder.intValue(.i8, 0), "");
}
/// Assumes the optional is not pointer-like and payload has bits.
fn optPayloadHandle(
fg: *FuncGen,
- opt_llvm_ty: *llvm.Type,
- opt_handle: *llvm.Value,
+ opt_llvm_ty: Builder.Type,
+ opt_handle: Builder.Value,
opt_ty: Type,
can_elide_load: bool,
- ) !*llvm.Value {
+ ) !Builder.Value {
const o = fg.dg.object;
const mod = o.module;
const payload_ty = opt_ty.optionalChild(mod);
if (isByRef(opt_ty, mod)) {
// We have a pointer and we need to return a pointer to the first field.
- const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, "");
+ const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
- const payload_alignment = payload_ty.abiAlignment(mod);
+ const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
@@ -9822,55 +10370,51 @@ pub const FuncGen = struct {
return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false);
}
const payload_llvm_ty = try o.lowerType(payload_ty);
- const load_inst = fg.builder.buildLoad(payload_llvm_ty, payload_ptr, "");
- load_inst.setAlignment(payload_alignment);
- return load_inst;
+ return fg.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, "");
}
assert(!isByRef(payload_ty, mod));
- return fg.builder.buildExtractValue(opt_handle, 0, "");
+ return fg.wip.extractValue(opt_handle, &.{0}, "");
}
fn buildOptional(
self: *FuncGen,
optional_ty: Type,
- payload: *llvm.Value,
- non_null_bit: *llvm.Value,
- ) !?*llvm.Value {
+ payload: Builder.Value,
+ non_null_bit: Builder.Value,
+ ) !Builder.Value {
const o = self.dg.object;
const optional_llvm_ty = try o.lowerType(optional_ty);
- const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), "");
+ const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, "");
const mod = o.module;
if (isByRef(optional_ty, mod)) {
- const payload_alignment = optional_ty.abiAlignment(mod);
- const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment);
+ const payload_alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
+ const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment);
{
- const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 0, "");
- const store_inst = self.builder.buildStore(payload, field_ptr);
- store_inst.setAlignment(payload_alignment);
+ const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 0, "");
+ _ = try self.wip.store(.normal, payload, field_ptr, payload_alignment);
}
{
- const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 1, "");
- const store_inst = self.builder.buildStore(non_null_field, field_ptr);
- store_inst.setAlignment(1);
+ const non_null_alignment = comptime Builder.Alignment.fromByteUnits(1);
+ const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 1, "");
+ _ = try self.wip.store(.normal, non_null_field, field_ptr, non_null_alignment);
}
return alloca_inst;
}
- const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, "");
- return self.builder.buildInsertValue(partial, non_null_field, 1, "");
+ return self.wip.buildAggregate(optional_llvm_ty, &.{ payload, non_null_field }, "");
}
fn fieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
- struct_ptr: *llvm.Value,
+ struct_ptr: Builder.Value,
struct_ptr_ty: Type,
field_index: u32,
- ) !?*llvm.Value {
+ ) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const struct_ty = struct_ptr_ty.childType(mod);
@@ -9892,26 +10436,25 @@ pub const FuncGen = struct {
// Offset our operand pointer by the correct number of bytes.
const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod);
if (byte_offset == 0) return struct_ptr;
- const byte_llvm_ty = self.context.intType(8);
- const llvm_usize = try o.lowerType(Type.usize);
- const llvm_index = llvm_usize.constInt(byte_offset, .False);
- const indices: [1]*llvm.Value = .{llvm_index};
- return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, "");
+ const usize_ty = try o.lowerType(Type.usize);
+ const llvm_index = try o.builder.intValue(usize_ty, byte_offset);
+ return self.wip.gep(.inbounds, .i8, struct_ptr, &.{llvm_index}, "");
},
else => {
const struct_llvm_ty = try o.lowerPtrElemTy(struct_ty);
if (llvmField(struct_ty, field_index, mod)) |llvm_field| {
- return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, "");
+ return self.wip.gepStruct(struct_llvm_ty, struct_ptr, llvm_field.index, "");
} else {
// If we found no index then this means this is a zero sized field at the
// end of the struct. Treat our struct pointer as an array of two and get
// the index to the element at index `1` to get a pointer to the end of
// the struct.
- const llvm_u32 = self.context.intType(32);
- const llvm_index = llvm_u32.constInt(@intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
- const indices: [1]*llvm.Value = .{llvm_index};
- return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, "");
+ const llvm_index = try o.builder.intValue(
+ try o.lowerType(Type.usize),
+ @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)),
+ );
+ return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, "");
}
},
},
@@ -9920,126 +10463,128 @@ pub const FuncGen = struct {
if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
const union_llvm_ty = try o.lowerType(struct_ty);
- const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, "");
- return union_field_ptr;
+ return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
},
else => unreachable,
}
}
- fn getIntrinsic(fg: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value {
+ fn getIntrinsic(
+ fg: *FuncGen,
+ name: []const u8,
+ types: []const Builder.Type,
+ ) Allocator.Error!*llvm.Value {
+ const o = fg.dg.object;
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
- const o = fg.dg.object;
- return o.llvm_module.getIntrinsicDeclaration(id, types.ptr, types.len);
+ const llvm_types = try o.gpa.alloc(*llvm.Type, types.len);
+ defer o.gpa.free(llvm_types);
+ for (llvm_types, types) |*llvm_type, ty| llvm_type.* = ty.toLlvm(&o.builder);
+ return o.llvm_module.getIntrinsicDeclaration(id, llvm_types.ptr, llvm_types.len);
}
/// Load a by-ref type by constructing a new alloca and performing a memcpy.
fn loadByRef(
fg: *FuncGen,
- ptr: *llvm.Value,
+ ptr: Builder.Value,
pointee_type: Type,
- ptr_alignment: u32,
+ ptr_alignment: Builder.Alignment,
is_volatile: bool,
- ) !*llvm.Value {
+ ) !Builder.Value {
const o = fg.dg.object;
const mod = o.module;
const pointee_llvm_ty = try o.lowerType(pointee_type);
- const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod));
- const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align);
- const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits);
+ const result_align = Builder.Alignment.fromByteUnits(
+ @max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)),
+ );
+ const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align);
+ const usize_ty = try o.lowerType(Type.usize);
const size_bytes = pointee_type.abiSize(mod);
- _ = fg.builder.buildMemCpy(
- result_ptr,
- result_align,
- ptr,
- ptr_alignment,
- llvm_usize.constInt(size_bytes, .False),
+ _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildMemCpy(
+ result_ptr.toLlvm(&fg.wip),
+ @intCast(result_align.toByteUnits() orelse 0),
+ ptr.toLlvm(&fg.wip),
+ @intCast(ptr_alignment.toByteUnits() orelse 0),
+ (try o.builder.intConst(usize_ty, size_bytes)).toLlvm(&o.builder),
is_volatile,
- );
+ ), &fg.wip);
return result_ptr;
}
/// This function always performs a copy. For isByRef=true types, it creates a new
/// alloca and copies the value into it, then returns the alloca instruction.
/// For isByRef=false types, it creates a load instruction and returns it.
- fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
+ fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const info = ptr_ty.ptrInfo(mod);
const elem_ty = info.child.toType();
- if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
- const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse
- elem_ty.abiAlignment(mod)));
- const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
+ const ptr_alignment = Builder.Alignment.fromByteUnits(
+ info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod),
+ );
+ const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) {
+ false => .normal,
+ true => .@"volatile",
+ };
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) {
- const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False);
+ const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index));
const vec_elem_ty = try o.lowerType(elem_ty);
- const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size);
+ const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty);
- const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
- loaded_vector.setAlignment(ptr_alignment);
- loaded_vector.setVolatile(ptr_volatile);
-
- return self.builder.buildExtractElement(loaded_vector, index_u32, "");
+ const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, "");
+ return self.wip.extractElement(loaded_vector, index_u32, "");
}
if (info.packed_offset.host_size == 0) {
if (isByRef(elem_ty, mod)) {
return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile);
}
- const elem_llvm_ty = try o.lowerType(elem_ty);
- const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, "");
- llvm_inst.setAlignment(ptr_alignment);
- llvm_inst.setVolatile(ptr_volatile);
- return llvm_inst;
+ return self.wip.load(ptr_kind, try o.lowerType(elem_ty), ptr, ptr_alignment, "");
}
- const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8);
- const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
- containing_int.setAlignment(ptr_alignment);
- containing_int.setVolatile(ptr_volatile);
+ const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8));
+ const containing_int = try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, "");
- const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
- const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False);
- const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
+ const elem_bits = ptr_ty.childType(mod).bitSize(mod);
+ const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset);
+ const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(elem_ty);
if (isByRef(elem_ty, mod)) {
- const result_align = elem_ty.abiAlignment(mod);
- const result_ptr = self.buildAlloca(elem_llvm_ty, result_align);
+ const result_align = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
+ const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align);
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- const store_inst = self.builder.buildStore(truncated_int, result_ptr);
- store_inst.setAlignment(result_align);
+ const same_size_int = try o.builder.intType(@intCast(elem_bits));
+ const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
+ _ = try self.wip.store(.normal, truncated_int, result_ptr, result_align);
return result_ptr;
}
if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) {
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(elem_bits));
+ const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
+ return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
}
if (elem_ty.isPtrAtRuntime(mod)) {
- const same_size_int = self.context.intType(elem_bits);
- const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
- return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
+ const same_size_int = try o.builder.intType(@intCast(elem_bits));
+ const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
+ return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
}
- return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
+ return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, "");
}
fn store(
self: *FuncGen,
- ptr: *llvm.Value,
+ ptr: Builder.Value,
ptr_ty: Type,
- elem: *llvm.Value,
- ordering: llvm.AtomicOrdering,
+ elem: Builder.Value,
+ ordering: Builder.AtomicOrdering,
) !void {
const o = self.dg.object;
const mod = o.module;
@@ -10048,124 +10593,115 @@ pub const FuncGen = struct {
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
- const ptr_alignment = ptr_ty.ptrAlignment(mod);
- const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
+ const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
+ const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) {
+ false => .normal,
+ true => .@"volatile",
+ };
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) {
- const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False);
+ const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index));
const vec_elem_ty = try o.lowerType(elem_ty);
- const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size);
+ const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty);
- const loaded_vector = self.builder.buildLoad(vec_ty, ptr, "");
- loaded_vector.setAlignment(ptr_alignment);
- loaded_vector.setVolatile(ptr_volatile);
+ const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, "");
- const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32, "");
+ const modified_vector = try self.wip.insertElement(loaded_vector, elem, index_u32, "");
- const store_inst = self.builder.buildStore(modified_vector, ptr);
- assert(ordering == .NotAtomic);
- store_inst.setAlignment(ptr_alignment);
- store_inst.setVolatile(ptr_volatile);
+ assert(ordering == .none);
+ _ = try self.wip.store(ptr_kind, modified_vector, ptr, ptr_alignment);
return;
}
if (info.packed_offset.host_size != 0) {
- const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8);
- const containing_int = self.builder.buildLoad(int_elem_ty, ptr, "");
- assert(ordering == .NotAtomic);
- containing_int.setAlignment(ptr_alignment);
- containing_int.setVolatile(ptr_volatile);
- const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
- const containing_int_ty = containing_int.typeOf();
- const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False);
+ const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8));
+ assert(ordering == .none);
+ const containing_int =
+ try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, "");
+ const elem_bits = ptr_ty.childType(mod).bitSize(mod);
+ const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
- const value_bits_type = self.context.intType(elem_bits);
+ const value_bits_type = try o.builder.intType(@intCast(elem_bits));
const value_bits = if (elem_ty.isPtrAtRuntime(mod))
- self.builder.buildPtrToInt(elem, value_bits_type, "")
+ try self.wip.cast(.ptrtoint, elem, value_bits_type, "")
else
- self.builder.buildBitCast(elem, value_bits_type, "");
-
- var mask_val = value_bits_type.constAllOnes();
- mask_val = mask_val.constZExt(containing_int_ty);
- mask_val = mask_val.constShl(shift_amt);
- mask_val = mask_val.constNot();
-
- const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, "");
- const extended_value = self.builder.buildZExt(value_bits, containing_int_ty, "");
- const shifted_value = self.builder.buildShl(extended_value, shift_amt, "");
- const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, "");
-
- const store_inst = self.builder.buildStore(ored_value, ptr);
- assert(ordering == .NotAtomic);
- store_inst.setAlignment(ptr_alignment);
- store_inst.setVolatile(ptr_volatile);
+ try self.wip.cast(.bitcast, elem, value_bits_type, "");
+
+ var mask_val = try o.builder.intConst(value_bits_type, -1);
+ mask_val = try o.builder.castConst(.zext, mask_val, containing_int_ty);
+ mask_val = try o.builder.binConst(.shl, mask_val, shift_amt);
+ mask_val =
+ try o.builder.binConst(.xor, mask_val, try o.builder.intConst(containing_int_ty, -1));
+
+ const anded_containing_int =
+ try self.wip.bin(.@"and", containing_int, mask_val.toValue(), "");
+ const extended_value = try self.wip.cast(.zext, value_bits, containing_int_ty, "");
+ const shifted_value = try self.wip.bin(.shl, extended_value, shift_amt.toValue(), "");
+ const ored_value = try self.wip.bin(.@"or", shifted_value, anded_containing_int, "");
+
+ assert(ordering == .none);
+ _ = try self.wip.store(ptr_kind, ored_value, ptr, ptr_alignment);
return;
}
if (!isByRef(elem_ty, mod)) {
- const store_inst = self.builder.buildStore(elem, ptr);
- store_inst.setOrdering(ordering);
- store_inst.setAlignment(ptr_alignment);
- store_inst.setVolatile(ptr_volatile);
+ _ = try self.wip.storeAtomic(ptr_kind, elem, ptr, self.sync_scope, ordering, ptr_alignment);
return;
}
- assert(ordering == .NotAtomic);
+ assert(ordering == .none);
const size_bytes = elem_ty.abiSize(mod);
- _ = self.builder.buildMemCpy(
- ptr,
- ptr_alignment,
- elem,
+ _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy(
+ ptr.toLlvm(&self.wip),
+ @intCast(ptr_alignment.toByteUnits() orelse 0),
+ elem.toLlvm(&self.wip),
elem_ty.abiAlignment(mod),
- self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False),
+ (try o.builder.intConst(try o.lowerType(Type.usize), size_bytes)).toLlvm(&o.builder),
info.flags.is_volatile,
- );
+ ), &self.wip);
}
- fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void {
+ fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void {
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
const o = fg.dg.object;
- const target = o.module.getTarget();
- const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
- const zero = usize_llvm_ty.constInt(0, .False);
- const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False);
- const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, "");
- _ = valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero);
+ const usize_ty = try o.lowerType(Type.usize);
+ const zero = try o.builder.intValue(usize_ty, 0);
+ const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED);
+ const ptr_as_usize = try fg.wip.cast(.ptrtoint, ptr, usize_ty, "");
+ _ = try valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero);
}
fn valgrindClientRequest(
fg: *FuncGen,
- default_value: *llvm.Value,
- request: *llvm.Value,
- a1: *llvm.Value,
- a2: *llvm.Value,
- a3: *llvm.Value,
- a4: *llvm.Value,
- a5: *llvm.Value,
- ) *llvm.Value {
+ default_value: Builder.Value,
+ request: Builder.Value,
+ a1: Builder.Value,
+ a2: Builder.Value,
+ a3: Builder.Value,
+ a4: Builder.Value,
+ a5: Builder.Value,
+ ) Allocator.Error!Builder.Value {
const o = fg.dg.object;
const mod = o.module;
const target = mod.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
- const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
- const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod)));
+ const llvm_usize = try o.lowerType(Type.usize);
+ const usize_alignment = Builder.Alignment.fromByteUnits(Type.usize.abiAlignment(mod));
- const array_llvm_ty = usize_llvm_ty.arrayType(6);
- const array_ptr = fg.valgrind_client_request_array orelse a: {
- const array_ptr = fg.buildAlloca(array_llvm_ty, usize_alignment);
+ const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
+ const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
+ const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment);
fg.valgrind_client_request_array = array_ptr;
break :a array_ptr;
- };
- const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 };
- const zero = usize_llvm_ty.constInt(0, .False);
+ } else fg.valgrind_client_request_array;
+ const array_elements = [_]Builder.Value{ request, a1, a2, a3, a4, a5 };
+ const zero = try o.builder.intValue(llvm_usize, 0);
for (array_elements, 0..) |elem, i| {
- const indexes = [_]*llvm.Value{
- zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False),
- };
- const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, "");
- const store_inst = fg.builder.buildStore(elem, elem_ptr);
- store_inst.setAlignment(usize_alignment);
+ const elem_ptr = try fg.wip.gep(.inbounds, array_llvm_ty, array_ptr, &.{
+ zero, try o.builder.intValue(llvm_usize, i),
+ }, "");
+ _ = try fg.wip.store(.normal, elem, elem_ptr, usize_alignment);
}
const arch_specific: struct {
@@ -10199,10 +10735,9 @@ pub const FuncGen = struct {
else => unreachable,
};
- const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, "");
- const args = [_]*llvm.Value{ array_ptr_as_usize, default_value };
- const param_types = [_]*llvm.Type{ usize_llvm_ty, usize_llvm_ty };
- const fn_llvm_ty = llvm.functionType(usize_llvm_ty, &param_types, args.len, .False);
+ const fn_llvm_ty = (try o.builder.fnType(llvm_usize, &(.{llvm_usize} ** 2), .normal)).toLlvm(&o.builder);
+ const array_ptr_as_usize = try fg.wip.cast(.ptrtoint, array_ptr, llvm_usize, "");
+ const args = [_]*llvm.Value{ array_ptr_as_usize.toLlvm(&fg.wip), default_value.toLlvm(&fg.wip) };
const asm_fn = llvm.getInlineAsm(
fn_llvm_ty,
arch_specific.template.ptr,
@@ -10215,14 +10750,9 @@ pub const FuncGen = struct {
.False, // can throw
);
- const call = fg.builder.buildCall(
- fn_llvm_ty,
- asm_fn,
- &args,
- args.len,
- .C,
- .Auto,
- "",
+ const call = (try fg.wip.unimplemented(llvm_usize, "")).finish(
+ fg.builder.buildCall(fn_llvm_ty, asm_fn, &args, args.len, .C, .Auto, ""),
+ &fg.wip,
);
return call;
}
@@ -10432,14 +10962,14 @@ fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
}
}
-fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrdering {
+fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering {
return switch (atomic_order) {
- .Unordered => .Unordered,
- .Monotonic => .Monotonic,
- .Acquire => .Acquire,
- .Release => .Release,
- .AcqRel => .AcquireRelease,
- .SeqCst => .SequentiallyConsistent,
+ .Unordered => .unordered,
+ .Monotonic => .monotonic,
+ .Acquire => .acquire,
+ .Release => .release,
+ .AcqRel => .acq_rel,
+ .SeqCst => .seq_cst,
};
}
@@ -10494,45 +11024,67 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
}
/// Convert a zig-address space to an llvm address space.
-fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) c_uint {
+fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
+ for (llvmAddrSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm;
+ unreachable;
+}
+
+const AddrSpaceInfo = struct {
+ zig: ?std.builtin.AddressSpace,
+ llvm: Builder.AddrSpace,
+ non_integral: bool = false,
+ size: ?u16 = null,
+ abi: ?u16 = null,
+ pref: ?u16 = null,
+ idx: ?u16 = null,
+ force_in_data_layout: bool = false,
+};
+fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo {
return switch (target.cpu.arch) {
- .x86, .x86_64 => switch (address_space) {
- .generic => llvm.address_space.default,
- .gs => llvm.address_space.x86.gs,
- .fs => llvm.address_space.x86.fs,
- .ss => llvm.address_space.x86.ss,
- else => unreachable,
+ .x86, .x86_64 => &.{
+ .{ .zig = .generic, .llvm = .default },
+ .{ .zig = .gs, .llvm = Builder.AddrSpace.x86.gs },
+ .{ .zig = .fs, .llvm = Builder.AddrSpace.x86.fs },
+ .{ .zig = .ss, .llvm = Builder.AddrSpace.x86.ss },
+ .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_sptr, .size = 32, .abi = 32, .force_in_data_layout = true },
+ .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_uptr, .size = 32, .abi = 32, .force_in_data_layout = true },
+ .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr64, .size = 64, .abi = 64, .force_in_data_layout = true },
},
- .nvptx, .nvptx64 => switch (address_space) {
- .generic => llvm.address_space.default,
- .global => llvm.address_space.nvptx.global,
- .constant => llvm.address_space.nvptx.constant,
- .param => llvm.address_space.nvptx.param,
- .shared => llvm.address_space.nvptx.shared,
- .local => llvm.address_space.nvptx.local,
- else => unreachable,
+ .nvptx, .nvptx64 => &.{
+ .{ .zig = .generic, .llvm = .default },
+ .{ .zig = .global, .llvm = Builder.AddrSpace.nvptx.global },
+ .{ .zig = .constant, .llvm = Builder.AddrSpace.nvptx.constant },
+ .{ .zig = .param, .llvm = Builder.AddrSpace.nvptx.param },
+ .{ .zig = .shared, .llvm = Builder.AddrSpace.nvptx.shared },
+ .{ .zig = .local, .llvm = Builder.AddrSpace.nvptx.local },
},
- .amdgcn => switch (address_space) {
- .generic => llvm.address_space.amdgpu.flat,
- .global => llvm.address_space.amdgpu.global,
- .constant => llvm.address_space.amdgpu.constant,
- .shared => llvm.address_space.amdgpu.local,
- .local => llvm.address_space.amdgpu.private,
- else => unreachable,
+ .amdgcn => &.{
+ .{ .zig = .generic, .llvm = Builder.AddrSpace.amdgpu.flat, .force_in_data_layout = true },
+ .{ .zig = .global, .llvm = Builder.AddrSpace.amdgpu.global, .force_in_data_layout = true },
+ .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.region, .size = 32, .abi = 32 },
+ .{ .zig = .shared, .llvm = Builder.AddrSpace.amdgpu.local, .size = 32, .abi = 32 },
+ .{ .zig = .constant, .llvm = Builder.AddrSpace.amdgpu.constant, .force_in_data_layout = true },
+ .{ .zig = .local, .llvm = Builder.AddrSpace.amdgpu.private, .size = 32, .abi = 32 },
+ .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.constant_32bit, .size = 32, .abi = 32 },
+ .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.buffer_fat_pointer, .non_integral = true },
},
- .avr => switch (address_space) {
- .generic => llvm.address_space.default,
- .flash => llvm.address_space.avr.flash,
- .flash1 => llvm.address_space.avr.flash1,
- .flash2 => llvm.address_space.avr.flash2,
- .flash3 => llvm.address_space.avr.flash3,
- .flash4 => llvm.address_space.avr.flash4,
- .flash5 => llvm.address_space.avr.flash5,
- else => unreachable,
+ .avr => &.{
+ .{ .zig = .generic, .llvm = .default, .abi = 8 },
+ .{ .zig = .flash, .llvm = Builder.AddrSpace.avr.flash, .abi = 8 },
+ .{ .zig = .flash1, .llvm = Builder.AddrSpace.avr.flash1, .abi = 8 },
+ .{ .zig = .flash2, .llvm = Builder.AddrSpace.avr.flash2, .abi = 8 },
+ .{ .zig = .flash3, .llvm = Builder.AddrSpace.avr.flash3, .abi = 8 },
+ .{ .zig = .flash4, .llvm = Builder.AddrSpace.avr.flash4, .abi = 8 },
+ .{ .zig = .flash5, .llvm = Builder.AddrSpace.avr.flash5, .abi = 8 },
},
- else => switch (address_space) {
- .generic => llvm.address_space.default,
- else => unreachable,
+ .wasm32, .wasm64 => &.{
+ .{ .zig = .generic, .llvm = .default, .force_in_data_layout = true },
+ .{ .zig = null, .llvm = Builder.AddrSpace.wasm.variable, .non_integral = true },
+ .{ .zig = null, .llvm = Builder.AddrSpace.wasm.externref, .non_integral = true, .size = 8, .abi = 8 },
+ .{ .zig = null, .llvm = Builder.AddrSpace.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 },
+ },
+ else => &.{
+ .{ .zig = .generic, .llvm = .default },
},
};
}
@@ -10541,30 +11093,30 @@ fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Targe
/// different address, space and then cast back to the generic address space.
/// For example, on GPUs local variable declarations must be generated into the local address space.
/// This function returns the address space local values should be generated into.
-fn llvmAllocaAddressSpace(target: std.Target) c_uint {
+fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, locals should be generated into the private address space.
// To make Zig not impossible to use, these are then converted to addresses in the
// generic address space and treates as regular pointers. This is the way that HIP also does it.
- .amdgcn => llvm.address_space.amdgpu.private,
- else => llvm.address_space.default,
+ .amdgcn => Builder.AddrSpace.amdgpu.private,
+ else => .default,
};
}
/// On some targets, global values that are in the generic address space must be generated into a
/// different address space, and then cast back to the generic address space.
-fn llvmDefaultGlobalAddressSpace(target: std.Target) c_uint {
+fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, globals must be explicitly allocated and uploaded so that the program can access
// them.
- .amdgcn => llvm.address_space.amdgpu.global,
- else => llvm.address_space.default,
+ .amdgcn => Builder.AddrSpace.amdgpu.global,
+ else => .default,
};
}
/// Return the actual address space that a value should be stored in if its a global address space.
/// When a value is placed in the resulting address space, it needs to be cast back into wanted_address_space.
-fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) c_uint {
+fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
return switch (wanted_address_space) {
.generic => llvmDefaultGlobalAddressSpace(target),
else => |as| toLlvmAddressSpace(as, target),
@@ -10694,28 +11246,20 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
/// In order to support the C calling convention, some return types need to be lowered
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
-fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const return_type = fn_info.return_type.toType();
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (return_type.isError(mod)) {
- return o.lowerType(Type.anyerror);
- } else {
- return o.context.voidType();
- }
+ return if (return_type.isError(mod)) Builder.Type.err_int else .void;
}
const target = mod.getTarget();
switch (fn_info.cc) {
- .Unspecified, .Inline => {
- if (isByRef(return_type, mod)) {
- return o.context.voidType();
- } else {
- return o.lowerType(return_type);
- }
- },
+ .Unspecified,
+ .Inline,
+ => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type),
.C => {
switch (target.cpu.arch) {
.mips, .mipsel => return o.lowerType(return_type),
@@ -10729,50 +11273,37 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
}
const classes = wasm_c_abi.classifyType(return_type, mod);
if (classes[0] == .indirect or classes[0] == .none) {
- return o.context.voidType();
+ return .void;
}
assert(classes[0] == .direct and classes[1] == .none);
const scalar_type = wasm_c_abi.scalarType(return_type, mod);
- const abi_size = scalar_type.abiSize(mod);
- return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
+ return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8));
},
.aarch64, .aarch64_be => {
switch (aarch64_c_abi.classifyType(return_type, mod)) {
- .memory => return o.context.voidType(),
+ .memory => return .void,
.float_array => return o.lowerType(return_type),
.byval => return o.lowerType(return_type),
- .integer => {
- const bit_size = return_type.bitSize(mod);
- return o.context.intType(@as(c_uint, @intCast(bit_size)));
- },
- .double_integer => return o.context.intType(64).arrayType(2),
+ .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))),
+ .double_integer => return o.builder.arrayType(2, .i64),
}
},
.arm, .armeb => {
switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
- .memory, .i64_array => return o.context.voidType(),
- .i32_array => |len| if (len == 1) {
- return o.context.intType(32);
- } else {
- return o.context.voidType();
- },
+ .memory, .i64_array => return .void,
+ .i32_array => |len| return if (len == 1) .i32 else .void,
.byval => return o.lowerType(return_type),
}
},
.riscv32, .riscv64 => {
switch (riscv_c_abi.classifyType(return_type, mod)) {
- .memory => return o.context.voidType(),
+ .memory => return .void,
.integer => {
- const bit_size = return_type.bitSize(mod);
- return o.context.intType(@as(c_uint, @intCast(bit_size)));
+ return o.builder.intType(@intCast(return_type.bitSize(mod)));
},
.double_integer => {
- var llvm_types_buffer: [2]*llvm.Type = .{
- o.context.intType(64),
- o.context.intType(64),
- };
- return o.context.structType(&llvm_types_buffer, 2, .False);
+ return o.builder.structType(.normal, &.{ .i64, .i64 });
},
.byval => return o.lowerType(return_type),
}
@@ -10783,18 +11314,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
},
.Win64 => return lowerWin64FnRetTy(o, fn_info),
.SysV => return lowerSystemVFnRetTy(o, fn_info),
- .Stdcall => {
- if (isScalar(mod, return_type)) {
- return o.lowerType(return_type);
- } else {
- return o.context.voidType();
- }
- },
+ .Stdcall => return if (isScalar(mod, return_type)) o.lowerType(return_type) else .void,
else => return o.lowerType(return_type),
}
}
-fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const return_type = fn_info.return_type.toType();
switch (x86_64_abi.classifyWindows(return_type, mod)) {
@@ -10802,53 +11327,48 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
} else {
- const abi_size = return_type.abiSize(mod);
- return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
+ return o.builder.intType(@intCast(return_type.abiSize(mod) * 8));
}
},
- .win_i128 => return o.context.intType(64).vectorType(2),
- .memory => return o.context.voidType(),
+ .win_i128 => return o.builder.vectorType(.normal, 2, .i64),
+ .memory => return .void,
.sse => return o.lowerType(return_type),
else => unreachable,
}
}
-fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const return_type = fn_info.return_type.toType();
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
- if (classes[0] == .memory) {
- return o.context.voidType();
- }
- var llvm_types_buffer: [8]*llvm.Type = undefined;
- var llvm_types_index: u32 = 0;
+ if (classes[0] == .memory) return .void;
+ var types_index: u32 = 0;
+ var types_buffer: [8]Builder.Type = undefined;
for (classes) |class| {
switch (class) {
.integer => {
- llvm_types_buffer[llvm_types_index] = o.context.intType(64);
- llvm_types_index += 1;
+ types_buffer[types_index] = .i64;
+ types_index += 1;
},
.sse, .sseup => {
- llvm_types_buffer[llvm_types_index] = o.context.doubleType();
- llvm_types_index += 1;
+ types_buffer[types_index] = .double;
+ types_index += 1;
},
.float => {
- llvm_types_buffer[llvm_types_index] = o.context.floatType();
- llvm_types_index += 1;
+ types_buffer[types_index] = .float;
+ types_index += 1;
},
.float_combine => {
- llvm_types_buffer[llvm_types_index] = o.context.floatType().vectorType(2);
- llvm_types_index += 1;
+ types_buffer[types_index] = try o.builder.vectorType(.normal, 2, .float);
+ types_index += 1;
},
.x87 => {
- if (llvm_types_index != 0 or classes[2] != .none) {
- return o.context.voidType();
- }
- llvm_types_buffer[llvm_types_index] = o.context.x86FP80Type();
- llvm_types_index += 1;
+ if (types_index != 0 or classes[2] != .none) return .void;
+ types_buffer[types_index] = .x86_fp80;
+ types_index += 1;
},
.x87up => continue,
.complex_x87 => {
@@ -10860,10 +11380,9 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type
}
}
if (classes[0] == .integer and classes[1] == .none) {
- const abi_size = return_type.abiSize(mod);
- return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
+ return o.builder.intType(@intCast(return_type.abiSize(mod) * 8));
}
- return o.context.structType(&llvm_types_buffer, llvm_types_index, .False);
+ return o.builder.structType(.normal, types_buffer[0..types_index]);
}
const ParamTypeIterator = struct {
@@ -10871,8 +11390,8 @@ const ParamTypeIterator = struct {
fn_info: InternPool.Key.FuncType,
zig_index: u32,
llvm_index: u32,
- llvm_types_len: u32,
- llvm_types_buffer: [8]*llvm.Type,
+ types_len: u32,
+ types_buffer: [8]Builder.Type,
byval_attr: bool,
const Lowering = union(enum) {
@@ -10889,7 +11408,7 @@ const ParamTypeIterator = struct {
i64_array: u8,
};
- pub fn next(it: *ParamTypeIterator) ?Lowering {
+ pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const mod = it.object.module;
const ip = &mod.intern_pool;
@@ -10899,7 +11418,7 @@ const ParamTypeIterator = struct {
}
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
- pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) ?Lowering {
+ pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
if (it.zig_index >= it.fn_info.param_types.len) {
@@ -10913,7 +11432,7 @@ const ParamTypeIterator = struct {
}
}
- fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering {
+ fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const target = mod.getTarget();
@@ -10968,8 +11487,8 @@ const ParamTypeIterator = struct {
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
.integer => {
- it.llvm_types_len = 1;
- it.llvm_types_buffer[0] = it.object.context.intType(64);
+ it.types_len = 1;
+ it.types_buffer[0] = .i64;
return .multiple_llvm_types;
},
.double_integer => return Lowering{ .i64_array = 2 },
@@ -11063,7 +11582,7 @@ const ParamTypeIterator = struct {
}
}
- fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering {
+ fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
if (classes[0] == .memory) {
@@ -11077,25 +11596,25 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .byval;
}
- var llvm_types_buffer: [8]*llvm.Type = undefined;
- var llvm_types_index: u32 = 0;
+ var types_index: u32 = 0;
+ var types_buffer: [8]Builder.Type = undefined;
for (classes) |class| {
switch (class) {
.integer => {
- llvm_types_buffer[llvm_types_index] = it.object.context.intType(64);
- llvm_types_index += 1;
+ types_buffer[types_index] = .i64;
+ types_index += 1;
},
.sse, .sseup => {
- llvm_types_buffer[llvm_types_index] = it.object.context.doubleType();
- llvm_types_index += 1;
+ types_buffer[types_index] = .double;
+ types_index += 1;
},
.float => {
- llvm_types_buffer[llvm_types_index] = it.object.context.floatType();
- llvm_types_index += 1;
+ types_buffer[types_index] = .float;
+ types_index += 1;
},
.float_combine => {
- llvm_types_buffer[llvm_types_index] = it.object.context.floatType().vectorType(2);
- llvm_types_index += 1;
+ types_buffer[types_index] = try it.object.builder.vectorType(.normal, 2, .float);
+ types_index += 1;
},
.x87 => {
it.zig_index += 1;
@@ -11117,9 +11636,9 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .abi_sized_int;
}
- it.llvm_types_buffer = llvm_types_buffer;
- it.llvm_types_len = llvm_types_index;
- it.llvm_index += llvm_types_index;
+ it.types_len = types_index;
+ it.types_buffer = types_buffer;
+ it.llvm_index += types_index;
it.zig_index += 1;
return .multiple_llvm_types;
}
@@ -11131,8 +11650,8 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp
.fn_info = fn_info,
.zig_index = 0,
.llvm_index = 0,
- .llvm_types_buffer = undefined,
- .llvm_types_len = 0,
+ .types_len = 0,
+ .types_buffer = undefined,
.byval_attr = false,
};
}
@@ -11355,23 +11874,23 @@ const AnnotatedDITypePtr = enum(usize) {
fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
assert(@as(u1, @truncate(addr)) == 0);
- return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1));
+ return @enumFromInt(addr | 1);
}
fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
- return @as(AnnotatedDITypePtr, @enumFromInt(addr));
+ return @enumFromInt(addr);
}
fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr {
const addr = @intFromPtr(di_type);
const bit = @intFromBool(resolve == .fwd);
- return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit));
+ return @enumFromInt(addr | bit);
}
fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType {
const fixed_addr = @intFromEnum(self) & ~@as(usize, 1);
- return @as(*llvm.DIType, @ptrFromInt(fixed_addr));
+ return @ptrFromInt(fixed_addr);
}
fn isFwdOnly(self: AnnotatedDITypePtr) bool {
@@ -11401,56 +11920,39 @@ fn compilerRtIntBits(bits: u16) u16 {
}
fn buildAllocaInner(
- context: *llvm.Context,
- builder: *llvm.Builder,
- llvm_func: *llvm.Value,
+ wip: *Builder.WipFunction,
di_scope_non_null: bool,
- llvm_ty: *llvm.Type,
- maybe_alignment: ?c_uint,
+ llvm_ty: Builder.Type,
+ alignment: Builder.Alignment,
target: std.Target,
-) *llvm.Value {
+) Allocator.Error!Builder.Value {
const address_space = llvmAllocaAddressSpace(target);
const alloca = blk: {
- const prev_block = builder.getInsertBlock();
- const prev_debug_location = builder.getCurrentDebugLocation2();
+ const prev_cursor = wip.cursor;
+ const prev_debug_location = wip.llvm.builder.getCurrentDebugLocation2();
defer {
- builder.positionBuilderAtEnd(prev_block);
- if (di_scope_non_null) {
- builder.setCurrentDebugLocation2(prev_debug_location);
- }
- }
-
- const entry_block = llvm_func.getFirstBasicBlock().?;
- if (entry_block.getFirstInstruction()) |first_inst| {
- builder.positionBuilder(entry_block, first_inst);
- } else {
- builder.positionBuilderAtEnd(entry_block);
+ wip.cursor = prev_cursor;
+ if (wip.cursor.block == .entry) wip.cursor.instruction += 1;
+ if (di_scope_non_null) wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location);
}
- builder.clearCurrentDebugLocation();
- break :blk builder.buildAllocaInAddressSpace(llvm_ty, address_space, "");
+ wip.cursor = .{ .block = .entry };
+ wip.llvm.builder.clearCurrentDebugLocation();
+ break :blk try wip.alloca(.normal, llvm_ty, .none, alignment, address_space, "");
};
- if (maybe_alignment) |alignment| {
- alloca.setAlignment(alignment);
- }
-
// The pointer returned from this function should have the generic address space,
// if this isn't the case then cast it to the generic address space.
- if (address_space != llvm.address_space.default) {
- return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), "");
- }
-
- return alloca;
+ return wip.conv(.unneeded, alloca, .ptr, "");
}
fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 {
- return @intFromBool(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod));
+ return @intFromBool(Type.err_int.abiAlignment(mod) > payload_ty.abiAlignment(mod));
}
fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 {
- return @intFromBool(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
+ return @intFromBool(Type.err_int.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig
new file mode 100644
index 0000000000..1df64ea8f6
--- /dev/null
+++ b/src/codegen/llvm/Builder.zig
@@ -0,0 +1,7931 @@
+gpa: Allocator,
+use_lib_llvm: bool,
+strip: bool,
+
+llvm: if (build_options.have_llvm) struct {
+ context: *llvm.Context,
+ module: ?*llvm.Module = null,
+ target: ?*llvm.Target = null,
+ di_builder: ?*llvm.DIBuilder = null,
+ di_compile_unit: ?*llvm.DICompileUnit = null,
+ types: std.ArrayListUnmanaged(*llvm.Type) = .{},
+ globals: std.ArrayListUnmanaged(*llvm.Value) = .{},
+ constants: std.ArrayListUnmanaged(*llvm.Value) = .{},
+} else void,
+
+source_filename: String,
+data_layout: String,
+target_triple: String,
+
+string_map: std.AutoArrayHashMapUnmanaged(void, void),
+string_bytes: std.ArrayListUnmanaged(u8),
+string_indices: std.ArrayListUnmanaged(u32),
+
+types: std.AutoArrayHashMapUnmanaged(String, Type),
+next_unnamed_type: String,
+next_unique_type_id: std.AutoHashMapUnmanaged(String, u32),
+type_map: std.AutoArrayHashMapUnmanaged(void, void),
+type_items: std.ArrayListUnmanaged(Type.Item),
+type_extra: std.ArrayListUnmanaged(u32),
+
+globals: std.AutoArrayHashMapUnmanaged(String, Global),
+next_unnamed_global: String,
+next_replaced_global: String,
+next_unique_global_id: std.AutoHashMapUnmanaged(String, u32),
+aliases: std.ArrayListUnmanaged(Alias),
+variables: std.ArrayListUnmanaged(Variable),
+functions: std.ArrayListUnmanaged(Function),
+
+constant_map: std.AutoArrayHashMapUnmanaged(void, void),
+constant_items: std.MultiArrayList(Constant.Item),
+constant_extra: std.ArrayListUnmanaged(u32),
+constant_limbs: std.ArrayListUnmanaged(std.math.big.Limb),
+
+pub const expected_fields_len = 32;
+pub const expected_gep_indices_len = 8;
+pub const expected_cases_len = 8;
+pub const expected_incoming_len = 8;
+
+pub const Options = struct {
+ allocator: Allocator,
+ use_lib_llvm: bool = false,
+ strip: bool = true,
+ name: []const u8 = &.{},
+ target: std.Target = builtin.target,
+ triple: []const u8 = &.{},
+};
+
+pub const String = enum(u32) {
+ none = std.math.maxInt(u31),
+ empty,
+ _,
+
+ pub fn isAnon(self: String) bool {
+ assert(self != .none);
+ return self.toIndex() == null;
+ }
+
+ pub fn toSlice(self: String, b: *const Builder) ?[:0]const u8 {
+ const index = self.toIndex() orelse return null;
+ const start = b.string_indices.items[index];
+ const end = b.string_indices.items[index + 1];
+ return b.string_bytes.items[start .. end - 1 :0];
+ }
+
+ const FormatData = struct {
+ string: String,
+ builder: *const Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime fmt_str: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (comptime std.mem.indexOfNone(u8, fmt_str, "@\"")) |_|
+ @compileError("invalid format string: '" ++ fmt_str ++ "'");
+ assert(data.string != .none);
+ const slice = data.string.toSlice(data.builder) orelse
+ return writer.print("{d}", .{@intFromEnum(data.string)});
+ const full_slice = slice[0 .. slice.len + comptime @intFromBool(
+ std.mem.indexOfScalar(u8, fmt_str, '@') != null,
+ )];
+ const need_quotes = (comptime std.mem.indexOfScalar(u8, fmt_str, '"') != null) or
+ !isValidIdentifier(full_slice);
+ if (need_quotes) try writer.writeByte('"');
+ for (full_slice) |character| switch (character) {
+ '\\' => try writer.writeAll("\\\\"),
+ ' '...'"' - 1, '"' + 1...'\\' - 1, '\\' + 1...'~' => try writer.writeByte(character),
+ else => try writer.print("\\{X:0>2}", .{character}),
+ };
+ if (need_quotes) try writer.writeByte('"');
+ }
+ pub fn fmt(self: String, builder: *const Builder) std.fmt.Formatter(format) {
+ return .{ .data = .{ .string = self, .builder = builder } };
+ }
+
+ fn fromIndex(index: ?usize) String {
+ return @enumFromInt(@as(u32, @intCast((index orelse return .none) +
+ @intFromEnum(String.empty))));
+ }
+ fn toIndex(self: String) ?usize {
+ return std.math.sub(u32, @intFromEnum(self), @intFromEnum(String.empty)) catch null;
+ }
+
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: Adapter, key: []const u8) u32 {
+ return @truncate(std.hash.Wyhash.hash(0, key));
+ }
+ pub fn eql(ctx: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool {
+ return std.mem.eql(u8, lhs_key, String.fromIndex(rhs_index).toSlice(ctx.builder).?);
+ }
+ };
+};
+
+pub const Type = enum(u32) {
+ void,
+ half,
+ bfloat,
+ float,
+ double,
+ fp128,
+ x86_fp80,
+ ppc_fp128,
+ x86_amx,
+ x86_mmx,
+ label,
+ token,
+ metadata,
+
+ i1,
+ i8,
+ i16,
+ i29,
+ i32,
+ i64,
+ i80,
+ i128,
+ ptr,
+
+ none = std.math.maxInt(u32),
+ _,
+
+ pub const err_int = Type.i16;
+
+ pub const Tag = enum(u4) {
+ simple,
+ function,
+ vararg_function,
+ integer,
+ pointer,
+ target,
+ vector,
+ scalable_vector,
+ small_array,
+ array,
+ structure,
+ packed_structure,
+ named_structure,
+ };
+
+ pub const Simple = enum {
+ void,
+ half,
+ bfloat,
+ float,
+ double,
+ fp128,
+ x86_fp80,
+ ppc_fp128,
+ x86_amx,
+ x86_mmx,
+ label,
+ token,
+ metadata,
+ };
+
+ pub const Function = struct {
+ ret: Type,
+ params_len: u32,
+ //params: [params_len]Value,
+
+ pub const Kind = enum { normal, vararg };
+ };
+
+ pub const Target = extern struct {
+ name: String,
+ types_len: u32,
+ ints_len: u32,
+ //types: [types_len]Type,
+ //ints: [ints_len]u32,
+ };
+
+ pub const Vector = extern struct {
+ len: u32,
+ child: Type,
+
+ fn length(self: Vector) u32 {
+ return self.len;
+ }
+
+ pub const Kind = enum { normal, scalable };
+ };
+
+ pub const Array = extern struct {
+ len_lo: u32,
+ len_hi: u32,
+ child: Type,
+
+ fn length(self: Array) u64 {
+ return @as(u64, self.len_hi) << 32 | self.len_lo;
+ }
+ };
+
+ pub const Structure = struct {
+ fields_len: u32,
+ //fields: [fields_len]Type,
+
+ pub const Kind = enum { normal, @"packed" };
+ };
+
+ pub const NamedStructure = struct {
+ id: String,
+ body: Type,
+ };
+
+ pub const Item = packed struct(u32) {
+ tag: Tag,
+ data: ExtraIndex,
+
+ pub const ExtraIndex = u28;
+ };
+
+ pub fn tag(self: Type, builder: *const Builder) Tag {
+ return builder.type_items.items[@intFromEnum(self)].tag;
+ }
+
+ pub fn unnamedTag(self: Type, builder: *const Builder) Tag {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body
+ .unnamedTag(builder),
+ else => item.tag,
+ };
+ }
+
+ pub fn scalarTag(self: Type, builder: *const Builder) Tag {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .vector, .scalable_vector => builder.typeExtraData(Type.Vector, item.data)
+ .child.tag(builder),
+ else => item.tag,
+ };
+ }
+
+ pub fn isFloatingPoint(self: Type) bool {
+ return switch (self) {
+ .half, .bfloat, .float, .double, .fp128, .x86_fp80, .ppc_fp128 => true,
+ else => false,
+ };
+ }
+
+ pub fn isInteger(self: Type, builder: *const Builder) bool {
+ return switch (self) {
+ .i1, .i8, .i16, .i29, .i32, .i64, .i80, .i128 => true,
+ else => switch (self.tag(builder)) {
+ .integer => true,
+ else => false,
+ },
+ };
+ }
+
+ pub fn isPointer(self: Type, builder: *const Builder) bool {
+ return switch (self) {
+ .ptr => true,
+ else => switch (self.tag(builder)) {
+ .pointer => true,
+ else => false,
+ },
+ };
+ }
+
+ pub fn isFunction(self: Type, builder: *const Builder) bool {
+ return switch (self.tag(builder)) {
+ .function, .vararg_function => true,
+ else => false,
+ };
+ }
+
+ pub fn functionKind(self: Type, builder: *const Builder) Type.Function.Kind {
+ return switch (self.tag(builder)) {
+ .function => .normal,
+ .vararg_function => .vararg,
+ else => unreachable,
+ };
+ }
+
+ pub fn functionParameters(self: Type, builder: *const Builder) []const Type {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ switch (item.tag) {
+ .function,
+ .vararg_function,
+ => {
+ var extra = builder.typeExtraDataTrail(Type.Function, item.data);
+ return extra.trail.next(extra.data.params_len, Type, builder);
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn functionReturn(self: Type, builder: *const Builder) Type {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ switch (item.tag) {
+ .function,
+ .vararg_function,
+ => return builder.typeExtraData(Type.Function, item.data).ret,
+ else => unreachable,
+ }
+ }
+
+ pub fn isVector(self: Type, builder: *const Builder) bool {
+ return switch (self.tag(builder)) {
+ .vector, .scalable_vector => true,
+ else => false,
+ };
+ }
+
+ pub fn vectorKind(self: Type, builder: *const Builder) Type.Vector.Kind {
+ return switch (self.tag(builder)) {
+ .vector => .normal,
+ .scalable_vector => .scalable,
+ else => unreachable,
+ };
+ }
+
+ pub fn isStruct(self: Type, builder: *const Builder) bool {
+ return switch (self.tag(builder)) {
+ .structure, .packed_structure, .named_structure => true,
+ else => false,
+ };
+ }
+
+ pub fn structKind(self: Type, builder: *const Builder) Type.Structure.Kind {
+ return switch (self.unnamedTag(builder)) {
+ .structure => .normal,
+ .packed_structure => .@"packed",
+ else => unreachable,
+ };
+ }
+
+ pub fn isAggregate(self: Type, builder: *const Builder) bool {
+ return switch (self.tag(builder)) {
+ .small_array, .array, .structure, .packed_structure, .named_structure => true,
+ else => false,
+ };
+ }
+
+ pub fn scalarBits(self: Type, builder: *const Builder) u24 {
+ return switch (self) {
+ .void, .label, .token, .metadata, .none, .x86_amx => unreachable,
+ .i1 => 1,
+ .i8 => 8,
+ .half, .bfloat, .i16 => 16,
+ .i29 => 29,
+ .float, .i32 => 32,
+ .double, .i64, .x86_mmx => 64,
+ .x86_fp80, .i80 => 80,
+ .fp128, .ppc_fp128, .i128 => 128,
+ .ptr => @panic("TODO: query data layout"),
+ _ => {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .simple,
+ .function,
+ .vararg_function,
+ => unreachable,
+ .integer => @intCast(item.data),
+ .pointer => @panic("TODO: query data layout"),
+ .target => unreachable,
+ .vector,
+ .scalable_vector,
+ => builder.typeExtraData(Type.Vector, item.data).child.scalarBits(builder),
+ .small_array,
+ .array,
+ .structure,
+ .packed_structure,
+ .named_structure,
+ => unreachable,
+ };
+ },
+ };
+ }
+
+ pub fn childType(self: Type, builder: *const Builder) Type {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .vector,
+ .scalable_vector,
+ .small_array,
+ => builder.typeExtraData(Type.Vector, item.data).child,
+ .array => builder.typeExtraData(Type.Array, item.data).child,
+ .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body,
+ else => unreachable,
+ };
+ }
+
+ pub fn scalarType(self: Type, builder: *const Builder) Type {
+ if (self.isFloatingPoint()) return self;
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .integer,
+ .pointer,
+ => self,
+ .vector,
+ .scalable_vector,
+ => builder.typeExtraData(Type.Vector, item.data).child,
+ else => unreachable,
+ };
+ }
+
+ pub fn changeScalar(self: Type, scalar: Type, builder: *Builder) Allocator.Error!Type {
+ try builder.ensureUnusedTypeCapacity(1, Type.Vector, 0);
+ return self.changeScalarAssumeCapacity(scalar, builder);
+ }
+
+ pub fn changeScalarAssumeCapacity(self: Type, scalar: Type, builder: *Builder) Type {
+ if (self.isFloatingPoint()) return scalar;
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .integer,
+ .pointer,
+ => scalar,
+ inline .vector,
+ .scalable_vector,
+ => |kind| builder.vectorTypeAssumeCapacity(
+ switch (kind) {
+ .vector => .normal,
+ .scalable_vector => .scalable,
+ else => unreachable,
+ },
+ builder.typeExtraData(Type.Vector, item.data).len,
+ scalar,
+ ),
+ else => unreachable,
+ };
+ }
+
+ pub fn vectorLen(self: Type, builder: *const Builder) u32 {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .vector,
+ .scalable_vector,
+ => builder.typeExtraData(Type.Vector, item.data).len,
+ else => unreachable,
+ };
+ }
+
+ pub fn changeLength(self: Type, len: u32, builder: *Builder) Allocator.Error!Type {
+ try builder.ensureUnusedTypeCapacity(1, Type.Array, 0);
+ return self.changeLengthAssumeCapacity(len, builder);
+ }
+
+ pub fn changeLengthAssumeCapacity(self: Type, len: u32, builder: *Builder) Type {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ inline .vector,
+ .scalable_vector,
+ => |kind| builder.vectorTypeAssumeCapacity(
+ switch (kind) {
+ .vector => .normal,
+ .scalable_vector => .scalable,
+ else => unreachable,
+ },
+ len,
+ builder.typeExtraData(Type.Vector, item.data).child,
+ ),
+ .small_array => builder.arrayTypeAssumeCapacity(
+ len,
+ builder.typeExtraData(Type.Vector, item.data).child,
+ ),
+ .array => builder.arrayTypeAssumeCapacity(
+ len,
+ builder.typeExtraData(Type.Array, item.data).child,
+ ),
+ else => unreachable,
+ };
+ }
+
+ pub fn aggregateLen(self: Type, builder: *const Builder) u64 {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .vector,
+ .scalable_vector,
+ .small_array,
+ => builder.typeExtraData(Type.Vector, item.data).len,
+ .array => builder.typeExtraData(Type.Array, item.data).length(),
+ .structure,
+ .packed_structure,
+ => builder.typeExtraData(Type.Structure, item.data).fields_len,
+ .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body
+ .aggregateLen(builder),
+ else => unreachable,
+ };
+ }
+
+ pub fn structFields(self: Type, builder: *const Builder) []const Type {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ switch (item.tag) {
+ .structure,
+ .packed_structure,
+ => {
+ var extra = builder.typeExtraDataTrail(Type.Structure, item.data);
+ return extra.trail.next(extra.data.fields_len, Type, builder);
+ },
+ .named_structure => return builder.typeExtraData(Type.NamedStructure, item.data).body
+ .structFields(builder),
+ else => unreachable,
+ }
+ }
+
+ pub fn childTypeAt(self: Type, indices: []const u32, builder: *const Builder) Type {
+ if (indices.len == 0) return self;
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .small_array => builder.typeExtraData(Type.Vector, item.data).child
+ .childTypeAt(indices[1..], builder),
+ .array => builder.typeExtraData(Type.Array, item.data).child
+ .childTypeAt(indices[1..], builder),
+ .structure,
+ .packed_structure,
+ => {
+ var extra = builder.typeExtraDataTrail(Type.Structure, item.data);
+ const fields = extra.trail.next(extra.data.fields_len, Type, builder);
+ return fields[indices[0]].childTypeAt(indices[1..], builder);
+ },
+ .named_structure => builder.typeExtraData(Type.NamedStructure, item.data).body
+ .childTypeAt(indices, builder),
+ else => unreachable,
+ };
+ }
+
+ pub fn targetLayoutType(self: Type, builder: *const Builder) Type {
+ _ = self;
+ _ = builder;
+ @panic("TODO: implement targetLayoutType");
+ }
+
+ pub fn isSized(self: Type, builder: *const Builder) Allocator.Error!bool {
+ var visited: IsSizedVisited = .{};
+ return self.isSizedVisited(&visited, builder);
+ }
+
+ const FormatData = struct {
+ type: Type,
+ builder: *const Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime fmt_str: []const u8,
+ fmt_opts: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ assert(data.type != .none);
+ if (comptime std.mem.eql(u8, fmt_str, "m")) {
+ const item = data.builder.type_items.items[@intFromEnum(data.type)];
+ switch (item.tag) {
+ .simple => try writer.writeAll(switch (@as(Simple, @enumFromInt(item.data))) {
+ .void => "isVoid",
+ .half => "f16",
+ .bfloat => "bf16",
+ .float => "f32",
+ .double => "f64",
+ .fp128 => "f128",
+ .x86_fp80 => "f80",
+ .ppc_fp128 => "ppcf128",
+ .x86_amx => "x86amx",
+ .x86_mmx => "x86mmx",
+ .label, .token => unreachable,
+ .metadata => "Metadata",
+ }),
+ .function, .vararg_function => |kind| {
+ var extra = data.builder.typeExtraDataTrail(Type.Function, item.data);
+ const params = extra.trail.next(extra.data.params_len, Type, data.builder);
+ try writer.print("f_{m}", .{extra.data.ret.fmt(data.builder)});
+ for (params) |param| try writer.print("{m}", .{param.fmt(data.builder)});
+ switch (kind) {
+ .function => {},
+ .vararg_function => try writer.writeAll("vararg"),
+ else => unreachable,
+ }
+ try writer.writeByte('f');
+ },
+ .integer => try writer.print("i{d}", .{item.data}),
+ .pointer => try writer.print("p{d}", .{item.data}),
+ .target => {
+ var extra = data.builder.typeExtraDataTrail(Type.Target, item.data);
+ const types = extra.trail.next(extra.data.types_len, Type, data.builder);
+ const ints = extra.trail.next(extra.data.ints_len, u32, data.builder);
+ try writer.print("t{s}", .{extra.data.name.toSlice(data.builder).?});
+ for (types) |ty| try writer.print("_{m}", .{ty.fmt(data.builder)});
+ for (ints) |int| try writer.print("_{d}", .{int});
+ try writer.writeByte('t');
+ },
+ .vector, .scalable_vector => |kind| {
+ const extra = data.builder.typeExtraData(Type.Vector, item.data);
+ try writer.print("{s}v{d}{m}", .{
+ switch (kind) {
+ .vector => "",
+ .scalable_vector => "nx",
+ else => unreachable,
+ },
+ extra.len,
+ extra.child.fmt(data.builder),
+ });
+ },
+ inline .small_array, .array => |kind| {
+ const extra = data.builder.typeExtraData(switch (kind) {
+ .small_array => Type.Vector,
+ .array => Type.Array,
+ else => unreachable,
+ }, item.data);
+ try writer.print("a{d}{m}", .{ extra.length(), extra.child.fmt(data.builder) });
+ },
+ .structure, .packed_structure => {
+ var extra = data.builder.typeExtraDataTrail(Type.Structure, item.data);
+ const fields = extra.trail.next(extra.data.fields_len, Type, data.builder);
+ try writer.writeAll("sl_");
+ for (fields) |field| try writer.print("{m}", .{field.fmt(data.builder)});
+ try writer.writeByte('s');
+ },
+ .named_structure => {
+ const extra = data.builder.typeExtraData(Type.NamedStructure, item.data);
+ try writer.writeAll("s_");
+ if (extra.id.toSlice(data.builder)) |id| try writer.writeAll(id);
+ },
+ }
+ return;
+ }
+ if (std.enums.tagName(Type, data.type)) |name| return writer.writeAll(name);
+ const item = data.builder.type_items.items[@intFromEnum(data.type)];
+ switch (item.tag) {
+ .simple => unreachable,
+ .function, .vararg_function => |kind| {
+ var extra = data.builder.typeExtraDataTrail(Type.Function, item.data);
+ const params = extra.trail.next(extra.data.params_len, Type, data.builder);
+ if (!comptime std.mem.eql(u8, fmt_str, ">"))
+ try writer.print("{%} ", .{extra.data.ret.fmt(data.builder)});
+ if (!comptime std.mem.eql(u8, fmt_str, "<")) {
+ try writer.writeByte('(');
+ for (params, 0..) |param, index| {
+ if (index > 0) try writer.writeAll(", ");
+ try writer.print("{%}", .{param.fmt(data.builder)});
+ }
+ switch (kind) {
+ .function => {},
+ .vararg_function => {
+ if (params.len > 0) try writer.writeAll(", ");
+ try writer.writeAll("...");
+ },
+ else => unreachable,
+ }
+ try writer.writeByte(')');
+ }
+ },
+ .integer => try writer.print("i{d}", .{item.data}),
+ .pointer => try writer.print("ptr{}", .{@as(AddrSpace, @enumFromInt(item.data))}),
+ .target => {
+ var extra = data.builder.typeExtraDataTrail(Type.Target, item.data);
+ const types = extra.trail.next(extra.data.types_len, Type, data.builder);
+ const ints = extra.trail.next(extra.data.ints_len, u32, data.builder);
+ try writer.print(
+ \\target({"}
+ , .{extra.data.name.fmt(data.builder)});
+ for (types) |ty| try writer.print(", {%}", .{ty.fmt(data.builder)});
+ for (ints) |int| try writer.print(", {d}", .{int});
+ try writer.writeByte(')');
+ },
+ .vector, .scalable_vector => |kind| {
+ const extra = data.builder.typeExtraData(Type.Vector, item.data);
+ try writer.print("<{s}{d} x {%}>", .{
+ switch (kind) {
+ .vector => "",
+ .scalable_vector => "vscale x ",
+ else => unreachable,
+ },
+ extra.len,
+ extra.child.fmt(data.builder),
+ });
+ },
+ inline .small_array, .array => |kind| {
+ const extra = data.builder.typeExtraData(switch (kind) {
+ .small_array => Type.Vector,
+ .array => Type.Array,
+ else => unreachable,
+ }, item.data);
+ try writer.print("[{d} x {%}]", .{ extra.length(), extra.child.fmt(data.builder) });
+ },
+ .structure, .packed_structure => |kind| {
+ var extra = data.builder.typeExtraDataTrail(Type.Structure, item.data);
+ const fields = extra.trail.next(extra.data.fields_len, Type, data.builder);
+ switch (kind) {
+ .structure => {},
+ .packed_structure => try writer.writeByte('<'),
+ else => unreachable,
+ }
+ try writer.writeAll("{ ");
+ for (fields, 0..) |field, index| {
+ if (index > 0) try writer.writeAll(", ");
+ try writer.print("{%}", .{field.fmt(data.builder)});
+ }
+ try writer.writeAll(" }");
+ switch (kind) {
+ .structure => {},
+ .packed_structure => try writer.writeByte('>'),
+ else => unreachable,
+ }
+ },
+ .named_structure => {
+ const extra = data.builder.typeExtraData(Type.NamedStructure, item.data);
+ if (comptime std.mem.eql(u8, fmt_str, "%")) try writer.print("%{}", .{
+ extra.id.fmt(data.builder),
+ }) else switch (extra.body) {
+ .none => try writer.writeAll("opaque"),
+ else => try format(.{
+ .type = extra.body,
+ .builder = data.builder,
+ }, fmt_str, fmt_opts, writer),
+ }
+ },
+ }
+ }
+ pub fn fmt(self: Type, builder: *const Builder) std.fmt.Formatter(format) {
+ return .{ .data = .{ .type = self, .builder = builder } };
+ }
+
+ pub fn toLlvm(self: Type, builder: *const Builder) *llvm.Type {
+ assert(builder.useLibLlvm());
+ return builder.llvm.types.items[@intFromEnum(self)];
+ }
+
+ const IsSizedVisited = std.AutoHashMapUnmanaged(Type, void);
+ fn isSizedVisited(
+ self: Type,
+ visited: *IsSizedVisited,
+ builder: *const Builder,
+ ) Allocator.Error!bool {
+ return switch (self) {
+ .void,
+ .label,
+ .token,
+ .metadata,
+ => false,
+ .half,
+ .bfloat,
+ .float,
+ .double,
+ .fp128,
+ .x86_fp80,
+ .ppc_fp128,
+ .x86_amx,
+ .x86_mmx,
+ .i1,
+ .i8,
+ .i16,
+ .i29,
+ .i32,
+ .i64,
+ .i80,
+ .i128,
+ .ptr,
+ => true,
+ .none => unreachable,
+ _ => {
+ const item = builder.type_items.items[@intFromEnum(self)];
+ return switch (item.tag) {
+ .simple => unreachable,
+ .function,
+ .vararg_function,
+ => false,
+ .integer,
+ .pointer,
+ => true,
+ .target => self.targetLayoutType(builder).isSizedVisited(visited, builder),
+ .vector,
+ .scalable_vector,
+ .small_array,
+ => builder.typeExtraData(Type.Vector, item.data)
+ .child.isSizedVisited(visited, builder),
+ .array => builder.typeExtraData(Type.Array, item.data)
+ .child.isSizedVisited(visited, builder),
+ .structure,
+ .packed_structure,
+ => {
+ if (try visited.fetchPut(builder.gpa, self, {})) |_| return false;
+
+ var extra = builder.typeExtraDataTrail(Type.Structure, item.data);
+ const fields = extra.trail.next(extra.data.fields_len, Type, builder);
+ for (fields) |field| {
+ if (field.isVector(builder) and field.vectorKind(builder) == .scalable)
+ return false;
+ if (!try field.isSizedVisited(visited, builder))
+ return false;
+ }
+ return true;
+ },
+ .named_structure => {
+ const body = builder.typeExtraData(Type.NamedStructure, item.data).body;
+ return body != .none and try body.isSizedVisited(visited, builder);
+ },
+ };
+ },
+ };
+ }
+};
+
+pub const Linkage = enum {
+ external,
+ private,
+ internal,
+ available_externally,
+ linkonce,
+ weak,
+ common,
+ appending,
+ extern_weak,
+ linkonce_odr,
+ weak_odr,
+
+ pub fn format(
+ self: Linkage,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .external) try writer.print(" {s}", .{@tagName(self)});
+ }
+};
+
+pub const Preemption = enum {
+ dso_preemptable,
+ dso_local,
+ implicit_dso_local,
+
+ pub fn format(
+ self: Preemption,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self == .dso_local) try writer.print(" {s}", .{@tagName(self)});
+ }
+};
+
+pub const Visibility = enum {
+ default,
+ hidden,
+ protected,
+
+ pub fn format(
+ self: Visibility,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .default) try writer.print(" {s}", .{@tagName(self)});
+ }
+};
+
+pub const DllStorageClass = enum {
+ default,
+ dllimport,
+ dllexport,
+
+ pub fn format(
+ self: DllStorageClass,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .default) try writer.print(" {s}", .{@tagName(self)});
+ }
+};
+
+pub const ThreadLocal = enum {
+ default,
+ generaldynamic,
+ localdynamic,
+ initialexec,
+ localexec,
+
+ pub fn format(
+ self: ThreadLocal,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self == .default) return;
+ try writer.writeAll(" thread_local");
+ if (self != .generaldynamic) {
+ try writer.writeByte('(');
+ try writer.writeAll(@tagName(self));
+ try writer.writeByte(')');
+ }
+ }
+};
+
+pub const UnnamedAddr = enum {
+ default,
+ unnamed_addr,
+ local_unnamed_addr,
+
+ pub fn format(
+ self: UnnamedAddr,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .default) try writer.print(" {s}", .{@tagName(self)});
+ }
+};
+
+pub const AddrSpace = enum(u24) {
+ default,
+ _,
+
+ // See llvm/lib/Target/X86/X86.h
+ pub const x86 = struct {
+ pub const gs: AddrSpace = @enumFromInt(256);
+ pub const fs: AddrSpace = @enumFromInt(257);
+ pub const ss: AddrSpace = @enumFromInt(258);
+
+ pub const ptr32_sptr: AddrSpace = @enumFromInt(270);
+ pub const ptr32_uptr: AddrSpace = @enumFromInt(271);
+ pub const ptr64: AddrSpace = @enumFromInt(272);
+ };
+ pub const x86_64 = x86;
+
+ // See llvm/lib/Target/AVR/AVR.h
+ pub const avr = struct {
+ pub const flash: AddrSpace = @enumFromInt(1);
+ pub const flash1: AddrSpace = @enumFromInt(2);
+ pub const flash2: AddrSpace = @enumFromInt(3);
+ pub const flash3: AddrSpace = @enumFromInt(4);
+ pub const flash4: AddrSpace = @enumFromInt(5);
+ pub const flash5: AddrSpace = @enumFromInt(6);
+ };
+
+ // See llvm/lib/Target/NVPTX/NVPTX.h
+ pub const nvptx = struct {
+ pub const generic: AddrSpace = @enumFromInt(0);
+ pub const global: AddrSpace = @enumFromInt(1);
+ pub const constant: AddrSpace = @enumFromInt(2);
+ pub const shared: AddrSpace = @enumFromInt(3);
+ pub const param: AddrSpace = @enumFromInt(4);
+ pub const local: AddrSpace = @enumFromInt(5);
+ };
+
+ // See llvm/lib/Target/AMDGPU/AMDGPU.h
+ pub const amdgpu = struct {
+ pub const flat: AddrSpace = @enumFromInt(0);
+ pub const global: AddrSpace = @enumFromInt(1);
+ pub const region: AddrSpace = @enumFromInt(2);
+ pub const local: AddrSpace = @enumFromInt(3);
+ pub const constant: AddrSpace = @enumFromInt(4);
+ pub const private: AddrSpace = @enumFromInt(5);
+ pub const constant_32bit: AddrSpace = @enumFromInt(6);
+ pub const buffer_fat_pointer: AddrSpace = @enumFromInt(7);
+ pub const param_d: AddrSpace = @enumFromInt(6);
+ pub const param_i: AddrSpace = @enumFromInt(7);
+ pub const constant_buffer_0: AddrSpace = @enumFromInt(8);
+ pub const constant_buffer_1: AddrSpace = @enumFromInt(9);
+ pub const constant_buffer_2: AddrSpace = @enumFromInt(10);
+ pub const constant_buffer_3: AddrSpace = @enumFromInt(11);
+ pub const constant_buffer_4: AddrSpace = @enumFromInt(12);
+ pub const constant_buffer_5: AddrSpace = @enumFromInt(13);
+ pub const constant_buffer_6: AddrSpace = @enumFromInt(14);
+ pub const constant_buffer_7: AddrSpace = @enumFromInt(15);
+ pub const constant_buffer_8: AddrSpace = @enumFromInt(16);
+ pub const constant_buffer_9: AddrSpace = @enumFromInt(17);
+ pub const constant_buffer_10: AddrSpace = @enumFromInt(18);
+ pub const constant_buffer_11: AddrSpace = @enumFromInt(19);
+ pub const constant_buffer_12: AddrSpace = @enumFromInt(20);
+ pub const constant_buffer_13: AddrSpace = @enumFromInt(21);
+ pub const constant_buffer_14: AddrSpace = @enumFromInt(22);
+ pub const constant_buffer_15: AddrSpace = @enumFromInt(23);
+ };
+
+ // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.h
+ pub const wasm = struct {
+ pub const variable: AddrSpace = @enumFromInt(1);
+ pub const externref: AddrSpace = @enumFromInt(10);
+ pub const funcref: AddrSpace = @enumFromInt(20);
+ };
+
+ pub fn format(
+ self: AddrSpace,
+ comptime prefix: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .default) try writer.print("{s} addrspace({d})", .{ prefix, @intFromEnum(self) });
+ }
+};
+
+pub const ExternallyInitialized = enum {
+ default,
+ externally_initialized,
+
+ pub fn format(
+ self: ExternallyInitialized,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self == .default) return;
+ try writer.writeByte(' ');
+ try writer.writeAll(@tagName(self));
+ }
+};
+
+pub const Alignment = enum(u6) {
+ default = std.math.maxInt(u6),
+ _,
+
+ pub fn fromByteUnits(bytes: u64) Alignment {
+ if (bytes == 0) return .default;
+ assert(std.math.isPowerOfTwo(bytes));
+ assert(bytes <= 1 << 32);
+ return @enumFromInt(@ctz(bytes));
+ }
+
+ pub fn toByteUnits(self: Alignment) ?u64 {
+ return if (self == .default) null else @as(u64, 1) << @intFromEnum(self);
+ }
+
+ pub fn format(
+ self: Alignment,
+ comptime prefix: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ try writer.print("{s} align {d}", .{ prefix, self.toByteUnits() orelse return });
+ }
+};
+
+pub const Global = struct {
+ linkage: Linkage = .external,
+ preemption: Preemption = .dso_preemptable,
+ visibility: Visibility = .default,
+ dll_storage_class: DllStorageClass = .default,
+ unnamed_addr: UnnamedAddr = .default,
+ addr_space: AddrSpace = .default,
+ externally_initialized: ExternallyInitialized = .default,
+ type: Type,
+ partition: String = .none,
+ kind: union(enum) {
+ alias: Alias.Index,
+ variable: Variable.Index,
+ function: Function.Index,
+ replaced: Global.Index,
+ },
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn unwrap(self: Index, builder: *const Builder) Index {
+ var cur = self;
+ while (true) {
+ const replacement = cur.getReplacement(builder);
+ if (replacement == .none) return cur;
+ cur = replacement;
+ }
+ }
+
+ pub fn eql(self: Index, other: Index, builder: *const Builder) bool {
+ return self.unwrap(builder) == other.unwrap(builder);
+ }
+
+ pub fn name(self: Index, builder: *const Builder) String {
+ return builder.globals.keys()[@intFromEnum(self.unwrap(builder))];
+ }
+
+ pub fn ptr(self: Index, builder: *Builder) *Global {
+ return &builder.globals.values()[@intFromEnum(self.unwrap(builder))];
+ }
+
+ pub fn ptrConst(self: Index, builder: *const Builder) *const Global {
+ return &builder.globals.values()[@intFromEnum(self.unwrap(builder))];
+ }
+
+ pub fn typeOf(self: Index, builder: *const Builder) Type {
+ return self.ptrConst(builder).type;
+ }
+
+ pub fn toConst(self: Index) Constant {
+ return @enumFromInt(@intFromEnum(Constant.first_global) + @intFromEnum(self));
+ }
+
+ pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
+ assert(builder.useLibLlvm());
+ return builder.llvm.globals.items[@intFromEnum(self.unwrap(builder))];
+ }
+
+ const FormatData = struct {
+ global: Index,
+ builder: *const Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime _: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ try writer.print("@{}", .{
+ data.global.unwrap(data.builder).name(data.builder).fmt(data.builder),
+ });
+ }
+ pub fn fmt(self: Index, builder: *const Builder) std.fmt.Formatter(format) {
+ return .{ .data = .{ .global = self, .builder = builder } };
+ }
+
+ pub fn rename(self: Index, new_name: String, builder: *Builder) Allocator.Error!void {
+ try builder.ensureUnusedGlobalCapacity(new_name);
+ self.renameAssumeCapacity(new_name, builder);
+ }
+
+ pub fn takeName(self: Index, other: Index, builder: *Builder) Allocator.Error!void {
+ try builder.ensureUnusedGlobalCapacity(.empty);
+ self.takeNameAssumeCapacity(other, builder);
+ }
+
+ pub fn replace(self: Index, other: Index, builder: *Builder) Allocator.Error!void {
+ try builder.ensureUnusedGlobalCapacity(.empty);
+ self.replaceAssumeCapacity(other, builder);
+ }
+
+ fn renameAssumeCapacity(self: Index, new_name: String, builder: *Builder) void {
+ const old_name = self.name(builder);
+ if (new_name == old_name) return;
+ const index = @intFromEnum(self.unwrap(builder));
+ if (builder.useLibLlvm())
+ builder.llvm.globals.appendAssumeCapacity(builder.llvm.globals.items[index]);
+ _ = builder.addGlobalAssumeCapacity(new_name, builder.globals.values()[index]);
+ if (builder.useLibLlvm()) _ = builder.llvm.globals.pop();
+ builder.globals.swapRemoveAt(index);
+ self.updateName(builder);
+ if (!old_name.isAnon()) return;
+ builder.next_unnamed_global = @enumFromInt(@intFromEnum(builder.next_unnamed_global) - 1);
+ if (builder.next_unnamed_global == old_name) return;
+ builder.getGlobal(builder.next_unnamed_global).?.renameAssumeCapacity(old_name, builder);
+ }
+
+ fn takeNameAssumeCapacity(self: Index, other: Index, builder: *Builder) void {
+ const other_name = other.name(builder);
+ other.renameAssumeCapacity(.empty, builder);
+ self.renameAssumeCapacity(other_name, builder);
+ }
+
+ fn updateName(self: Index, builder: *const Builder) void {
+ if (!builder.useLibLlvm()) return;
+ const index = @intFromEnum(self.unwrap(builder));
+ const name_slice = self.name(builder).toSlice(builder) orelse "";
+ builder.llvm.globals.items[index].setValueName2(name_slice.ptr, name_slice.len);
+ }
+
+ fn replaceAssumeCapacity(self: Index, other: Index, builder: *Builder) void {
+ if (self.eql(other, builder)) return;
+ builder.next_replaced_global = @enumFromInt(@intFromEnum(builder.next_replaced_global) - 1);
+ self.renameAssumeCapacity(builder.next_replaced_global, builder);
+ if (builder.useLibLlvm()) {
+ const self_llvm = self.toLlvm(builder);
+ self_llvm.replaceAllUsesWith(other.toLlvm(builder));
+ switch (self.ptr(builder).kind) {
+ .alias,
+ .variable,
+ => self_llvm.deleteGlobal(),
+ .function => self_llvm.deleteFunction(),
+ .replaced => unreachable,
+ }
+ }
+ self.ptr(builder).kind = .{ .replaced = other.unwrap(builder) };
+ }
+
+ fn getReplacement(self: Index, builder: *const Builder) Index {
+ return switch (builder.globals.values()[@intFromEnum(self)].kind) {
+ .replaced => |replacement| replacement,
+ else => .none,
+ };
+ }
+ };
+
+ pub fn updateAttributes(self: *Global) void {
+ switch (self.linkage) {
+ .private, .internal => {
+ self.visibility = .default;
+ self.dll_storage_class = .default;
+ self.preemption = .implicit_dso_local;
+ },
+ .extern_weak => if (self.preemption == .implicit_dso_local) {
+ self.preemption = .dso_local;
+ },
+ else => switch (self.visibility) {
+ .default => if (self.preemption == .implicit_dso_local) {
+ self.preemption = .dso_local;
+ },
+ else => self.preemption = .implicit_dso_local,
+ },
+ }
+ }
+};
+
+pub const Alias = struct {
+ global: Global.Index,
+ thread_local: ThreadLocal = .default,
+ init: Constant = .no_init,
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn getAliasee(self: Index, builder: *const Builder) Global.Index {
+ const aliasee = self.ptrConst(builder).init.getBase(builder);
+ assert(aliasee != .none);
+ return aliasee;
+ }
+
+ pub fn ptr(self: Index, builder: *Builder) *Alias {
+ return &builder.aliases.items[@intFromEnum(self)];
+ }
+
+ pub fn ptrConst(self: Index, builder: *const Builder) *const Alias {
+ return &builder.aliases.items[@intFromEnum(self)];
+ }
+
+ pub fn typeOf(self: Index, builder: *const Builder) Type {
+ return self.ptrConst(builder).global.typeOf(builder);
+ }
+
+ pub fn toConst(self: Index, builder: *const Builder) Constant {
+ return self.ptrConst(builder).global.toConst();
+ }
+
+ pub fn toValue(self: Index, builder: *const Builder) Value {
+ return self.toConst(builder).toValue();
+ }
+
+ pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
+ return self.ptrConst(builder).global.toLlvm(builder);
+ }
+ };
+};
+
+pub const Variable = struct {
+ global: Global.Index,
+ thread_local: ThreadLocal = .default,
+ mutability: enum { global, constant } = .global,
+ init: Constant = .no_init,
+ section: String = .none,
+ alignment: Alignment = .default,
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn ptr(self: Index, builder: *Builder) *Variable {
+ return &builder.variables.items[@intFromEnum(self)];
+ }
+
+ pub fn ptrConst(self: Index, builder: *const Builder) *const Variable {
+ return &builder.variables.items[@intFromEnum(self)];
+ }
+
+ pub fn typeOf(self: Index, builder: *const Builder) Type {
+ return self.ptrConst(builder).global.typeOf(builder);
+ }
+
+ pub fn toConst(self: Index, builder: *const Builder) Constant {
+ return self.ptrConst(builder).global.toConst();
+ }
+
+ pub fn toValue(self: Index, builder: *const Builder) Value {
+ return self.toConst(builder).toValue();
+ }
+
+ pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
+ return self.ptrConst(builder).global.toLlvm(builder);
+ }
+ };
+};
+
+pub const Function = struct {
+ global: Global.Index,
+ section: String = .none,
+ alignment: Alignment = .default,
+ blocks: []const Block = &.{},
+ instructions: std.MultiArrayList(Instruction) = .{},
+ names: [*]const String = &[0]String{},
+ metadata: ?[*]const Metadata = null,
+ extra: []const u32 = &.{},
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn ptr(self: Index, builder: *Builder) *Function {
+ return &builder.functions.items[@intFromEnum(self)];
+ }
+
+ pub fn ptrConst(self: Index, builder: *const Builder) *const Function {
+ return &builder.functions.items[@intFromEnum(self)];
+ }
+
+ pub fn typeOf(self: Index, builder: *const Builder) Type {
+ return self.ptrConst(builder).global.typeOf(builder);
+ }
+
+ pub fn toConst(self: Index, builder: *const Builder) Constant {
+ return self.ptrConst(builder).global.toConst();
+ }
+
+ pub fn toValue(self: Index, builder: *const Builder) Value {
+ return self.toConst(builder).toValue();
+ }
+
+ pub fn toLlvm(self: Index, builder: *const Builder) *llvm.Value {
+ return self.ptrConst(builder).global.toLlvm(builder);
+ }
+ };
+
+ pub const Block = struct {
+ instruction: Instruction.Index,
+
+ pub const Index = WipFunction.Block.Index;
+ };
+
+ pub const Instruction = struct {
+ tag: Tag,
+ data: u32,
+
+ pub const Tag = enum(u8) {
+ add,
+ @"add nsw",
+ @"add nuw",
+ @"add nuw nsw",
+ addrspacecast,
+ alloca,
+ @"alloca inalloca",
+ @"and",
+ arg,
+ ashr,
+ @"ashr exact",
+ bitcast,
+ block,
+ br,
+ br_cond,
+ extractelement,
+ extractvalue,
+ fadd,
+ @"fadd fast",
+ @"fcmp false",
+ @"fcmp fast false",
+ @"fcmp fast oeq",
+ @"fcmp fast oge",
+ @"fcmp fast ogt",
+ @"fcmp fast ole",
+ @"fcmp fast olt",
+ @"fcmp fast one",
+ @"fcmp fast ord",
+ @"fcmp fast true",
+ @"fcmp fast ueq",
+ @"fcmp fast uge",
+ @"fcmp fast ugt",
+ @"fcmp fast ule",
+ @"fcmp fast ult",
+ @"fcmp fast une",
+ @"fcmp fast uno",
+ @"fcmp oeq",
+ @"fcmp oge",
+ @"fcmp ogt",
+ @"fcmp ole",
+ @"fcmp olt",
+ @"fcmp one",
+ @"fcmp ord",
+ @"fcmp true",
+ @"fcmp ueq",
+ @"fcmp uge",
+ @"fcmp ugt",
+ @"fcmp ule",
+ @"fcmp ult",
+ @"fcmp une",
+ @"fcmp uno",
+ fdiv,
+ @"fdiv fast",
+ fence,
+ fmul,
+ @"fmul fast",
+ fneg,
+ @"fneg fast",
+ fpext,
+ fptosi,
+ fptoui,
+ fptrunc,
+ frem,
+ @"frem fast",
+ fsub,
+ @"fsub fast",
+ getelementptr,
+ @"getelementptr inbounds",
+ @"icmp eq",
+ @"icmp ne",
+ @"icmp sge",
+ @"icmp sgt",
+ @"icmp sle",
+ @"icmp slt",
+ @"icmp uge",
+ @"icmp ugt",
+ @"icmp ule",
+ @"icmp ult",
+ insertelement,
+ insertvalue,
+ inttoptr,
+ @"llvm.maxnum.",
+ @"llvm.minnum.",
+ @"llvm.sadd.sat.",
+ @"llvm.smax.",
+ @"llvm.smin.",
+ @"llvm.smul.fix.sat.",
+ @"llvm.sshl.sat.",
+ @"llvm.ssub.sat.",
+ @"llvm.uadd.sat.",
+ @"llvm.umax.",
+ @"llvm.umin.",
+ @"llvm.umul.fix.sat.",
+ @"llvm.ushl.sat.",
+ @"llvm.usub.sat.",
+ load,
+ @"load atomic",
+ @"load atomic volatile",
+ @"load volatile",
+ lshr,
+ @"lshr exact",
+ mul,
+ @"mul nsw",
+ @"mul nuw",
+ @"mul nuw nsw",
+ @"or",
+ phi,
+ @"phi fast",
+ ptrtoint,
+ ret,
+ @"ret void",
+ sdiv,
+ @"sdiv exact",
+ select,
+ @"select fast",
+ sext,
+ shl,
+ @"shl nsw",
+ @"shl nuw",
+ @"shl nuw nsw",
+ shufflevector,
+ sitofp,
+ srem,
+ store,
+ @"store atomic",
+ @"store atomic volatile",
+ @"store volatile",
+ sub,
+ @"sub nsw",
+ @"sub nuw",
+ @"sub nuw nsw",
+ @"switch",
+ trunc,
+ udiv,
+ @"udiv exact",
+ urem,
+ uitofp,
+ unimplemented,
+ @"unreachable",
+ va_arg,
+ xor,
+ zext,
+ };
+
+ pub const Index = enum(u32) {
+ none = std.math.maxInt(u31),
+ _,
+
+ pub fn name(self: Instruction.Index, function: *const Function) String {
+ return function.names[@intFromEnum(self)];
+ }
+
+ pub fn toValue(self: Instruction.Index) Value {
+ return @enumFromInt(@intFromEnum(self));
+ }
+
+ pub fn isTerminatorWip(self: Instruction.Index, wip: *const WipFunction) bool {
+ return switch (wip.instructions.items(.tag)[@intFromEnum(self)]) {
+ .br,
+ .br_cond,
+ .ret,
+ .@"ret void",
+ .@"unreachable",
+ => true,
+ else => false,
+ };
+ }
+
+ pub fn hasResultWip(self: Instruction.Index, wip: *const WipFunction) bool {
+ return switch (wip.instructions.items(.tag)[@intFromEnum(self)]) {
+ .br,
+ .br_cond,
+ .fence,
+ .ret,
+ .@"ret void",
+ .store,
+ .@"store atomic",
+ .@"store atomic volatile",
+ .@"store volatile",
+ .@"unreachable",
+ => false,
+ else => true,
+ };
+ }
+
+ pub fn typeOfWip(self: Instruction.Index, wip: *const WipFunction) Type {
+ const instruction = wip.instructions.get(@intFromEnum(self));
+ return switch (instruction.tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .@"add nuw nsw",
+ .@"and",
+ .ashr,
+ .@"ashr exact",
+ .fadd,
+ .@"fadd fast",
+ .fdiv,
+ .@"fdiv fast",
+ .fmul,
+ .@"fmul fast",
+ .frem,
+ .@"frem fast",
+ .fsub,
+ .@"fsub fast",
+ .@"llvm.maxnum.",
+ .@"llvm.minnum.",
+ .@"llvm.sadd.sat.",
+ .@"llvm.smax.",
+ .@"llvm.smin.",
+ .@"llvm.smul.fix.sat.",
+ .@"llvm.sshl.sat.",
+ .@"llvm.ssub.sat.",
+ .@"llvm.uadd.sat.",
+ .@"llvm.umax.",
+ .@"llvm.umin.",
+ .@"llvm.umul.fix.sat.",
+ .@"llvm.ushl.sat.",
+ .@"llvm.usub.sat.",
+ .lshr,
+ .@"lshr exact",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .@"mul nuw nsw",
+ .@"or",
+ .sdiv,
+ .@"sdiv exact",
+ .shl,
+ .@"shl nsw",
+ .@"shl nuw",
+ .@"shl nuw nsw",
+ .srem,
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .@"sub nuw nsw",
+ .udiv,
+ .@"udiv exact",
+ .urem,
+ .xor,
+ => wip.extraData(Binary, instruction.data).lhs.typeOfWip(wip),
+ .addrspacecast,
+ .bitcast,
+ .fpext,
+ .fptosi,
+ .fptoui,
+ .fptrunc,
+ .inttoptr,
+ .ptrtoint,
+ .sext,
+ .sitofp,
+ .trunc,
+ .uitofp,
+ .zext,
+ => wip.extraData(Cast, instruction.data).type,
+ .alloca,
+ .@"alloca inalloca",
+ => wip.builder.ptrTypeAssumeCapacity(
+ wip.extraData(Alloca, instruction.data).info.addr_space,
+ ),
+ .arg => wip.function.typeOf(wip.builder)
+ .functionParameters(wip.builder)[instruction.data],
+ .block => .label,
+ .br,
+ .br_cond,
+ .fence,
+ .ret,
+ .@"ret void",
+ .store,
+ .@"store atomic",
+ .@"store atomic volatile",
+ .@"store volatile",
+ .@"switch",
+ .@"unreachable",
+ => .none,
+ .extractelement => wip.extraData(ExtractElement, instruction.data)
+ .val.typeOfWip(wip).childType(wip.builder),
+ .extractvalue => {
+ var extra = wip.extraDataTrail(ExtractValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, wip);
+ return extra.data.val.typeOfWip(wip).childTypeAt(indices, wip.builder);
+ },
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ => wip.extraData(Binary, instruction.data).lhs.typeOfWip(wip)
+ .changeScalarAssumeCapacity(.i1, wip.builder),
+ .fneg,
+ .@"fneg fast",
+ => @as(Value, @enumFromInt(instruction.data)).typeOfWip(wip),
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => {
+ var extra = wip.extraDataTrail(GetElementPtr, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, Value, wip);
+ const base_ty = extra.data.base.typeOfWip(wip);
+ if (!base_ty.isVector(wip.builder)) for (indices) |index| {
+ const index_ty = index.typeOfWip(wip);
+ if (!index_ty.isVector(wip.builder)) continue;
+ return index_ty.changeScalarAssumeCapacity(base_ty, wip.builder);
+ };
+ return base_ty;
+ },
+ .insertelement => wip.extraData(InsertElement, instruction.data).val.typeOfWip(wip),
+ .insertvalue => wip.extraData(InsertValue, instruction.data).val.typeOfWip(wip),
+ .load,
+ .@"load atomic",
+ .@"load atomic volatile",
+ .@"load volatile",
+ => wip.extraData(Load, instruction.data).type,
+ .phi,
+ .@"phi fast",
+ => wip.extraData(Phi, instruction.data).type,
+ .select,
+ .@"select fast",
+ => wip.extraData(Select, instruction.data).lhs.typeOfWip(wip),
+ .shufflevector => {
+ const extra = wip.extraData(ShuffleVector, instruction.data);
+ return extra.lhs.typeOfWip(wip).changeLengthAssumeCapacity(
+ extra.mask.typeOfWip(wip).vectorLen(wip.builder),
+ wip.builder,
+ );
+ },
+ .unimplemented => @enumFromInt(instruction.data),
+ .va_arg => wip.extraData(VaArg, instruction.data).type,
+ };
+ }
+
+ pub fn typeOf(
+ self: Instruction.Index,
+ function_index: Function.Index,
+ builder: *Builder,
+ ) Type {
+ const function = function_index.ptrConst(builder);
+ const instruction = function.instructions.get(@intFromEnum(self));
+ return switch (instruction.tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .@"add nuw nsw",
+ .@"and",
+ .ashr,
+ .@"ashr exact",
+ .fadd,
+ .@"fadd fast",
+ .fdiv,
+ .@"fdiv fast",
+ .fmul,
+ .@"fmul fast",
+ .frem,
+ .@"frem fast",
+ .fsub,
+ .@"fsub fast",
+ .@"llvm.maxnum.",
+ .@"llvm.minnum.",
+ .@"llvm.sadd.sat.",
+ .@"llvm.smax.",
+ .@"llvm.smin.",
+ .@"llvm.smul.fix.sat.",
+ .@"llvm.sshl.sat.",
+ .@"llvm.ssub.sat.",
+ .@"llvm.uadd.sat.",
+ .@"llvm.umax.",
+ .@"llvm.umin.",
+ .@"llvm.umul.fix.sat.",
+ .@"llvm.ushl.sat.",
+ .@"llvm.usub.sat.",
+ .lshr,
+ .@"lshr exact",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .@"mul nuw nsw",
+ .@"or",
+ .sdiv,
+ .@"sdiv exact",
+ .shl,
+ .@"shl nsw",
+ .@"shl nuw",
+ .@"shl nuw nsw",
+ .srem,
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .@"sub nuw nsw",
+ .udiv,
+ .@"udiv exact",
+ .urem,
+ .xor,
+ => function.extraData(Binary, instruction.data).lhs.typeOf(function_index, builder),
+ .addrspacecast,
+ .bitcast,
+ .fpext,
+ .fptosi,
+ .fptoui,
+ .fptrunc,
+ .inttoptr,
+ .ptrtoint,
+ .sext,
+ .sitofp,
+ .trunc,
+ .uitofp,
+ .zext,
+ => function.extraData(Cast, instruction.data).type,
+ .alloca,
+ .@"alloca inalloca",
+ => builder.ptrTypeAssumeCapacity(
+ function.extraData(Alloca, instruction.data).info.addr_space,
+ ),
+ .arg => function.global.typeOf(builder)
+ .functionParameters(builder)[instruction.data],
+ .block => .label,
+ .br,
+ .br_cond,
+ .fence,
+ .ret,
+ .@"ret void",
+ .store,
+ .@"store atomic",
+ .@"store atomic volatile",
+ .@"store volatile",
+ .@"switch",
+ .@"unreachable",
+ => .none,
+ .extractelement => function.extraData(ExtractElement, instruction.data)
+ .val.typeOf(function_index, builder).childType(builder),
+ .extractvalue => {
+ var extra = function.extraDataTrail(ExtractValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, function);
+ return extra.data.val.typeOf(function_index, builder)
+ .childTypeAt(indices, builder);
+ },
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ => function.extraData(Binary, instruction.data).lhs.typeOf(function_index, builder)
+ .changeScalarAssumeCapacity(.i1, builder),
+ .fneg,
+ .@"fneg fast",
+ => @as(Value, @enumFromInt(instruction.data)).typeOf(function_index, builder),
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => {
+ var extra = function.extraDataTrail(GetElementPtr, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, Value, function);
+ const base_ty = extra.data.base.typeOf(function_index, builder);
+ if (!base_ty.isVector(builder)) for (indices) |index| {
+ const index_ty = index.typeOf(function_index, builder);
+ if (!index_ty.isVector(builder)) continue;
+ return index_ty.changeScalarAssumeCapacity(base_ty, builder);
+ };
+ return base_ty;
+ },
+ .insertelement => function.extraData(InsertElement, instruction.data)
+ .val.typeOf(function_index, builder),
+ .insertvalue => function.extraData(InsertValue, instruction.data)
+ .val.typeOf(function_index, builder),
+ .load,
+ .@"load atomic",
+ .@"load atomic volatile",
+ .@"load volatile",
+ => function.extraData(Load, instruction.data).type,
+ .phi,
+ .@"phi fast",
+ => function.extraData(Phi, instruction.data).type,
+ .select,
+ .@"select fast",
+ => function.extraData(Select, instruction.data).lhs.typeOf(function_index, builder),
+ .shufflevector => {
+ const extra = function.extraData(ShuffleVector, instruction.data);
+ return extra.lhs.typeOf(function_index, builder).changeLengthAssumeCapacity(
+ extra.mask.typeOf(function_index, builder).vectorLen(builder),
+ builder,
+ );
+ },
+ .unimplemented => @enumFromInt(instruction.data),
+ .va_arg => function.extraData(VaArg, instruction.data).type,
+ };
+ }
+
+ const FormatData = struct {
+ instruction: Instruction.Index,
+ function: Function.Index,
+ builder: *Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime fmt_str: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_|
+ @compileError("invalid format string: '" ++ fmt_str ++ "'");
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) {
+ if (data.instruction == .none) return;
+ try writer.writeByte(',');
+ }
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, ' ') != null) {
+ if (data.instruction == .none) return;
+ try writer.writeByte(' ');
+ }
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, '%') != null) try writer.print(
+ "{%} ",
+ .{data.instruction.typeOf(data.function, data.builder).fmt(data.builder)},
+ );
+ assert(data.instruction != .none);
+ try writer.print("%{}", .{
+ data.instruction.name(data.function.ptrConst(data.builder)).fmt(data.builder),
+ });
+ }
+ pub fn fmt(
+ self: Instruction.Index,
+ function: Function.Index,
+ builder: *Builder,
+ ) std.fmt.Formatter(format) {
+ return .{ .data = .{ .instruction = self, .function = function, .builder = builder } };
+ }
+
+ pub fn toLlvm(self: Instruction.Index, wip: *const WipFunction) *llvm.Value {
+ assert(wip.builder.useLibLlvm());
+ return wip.llvm.instructions.items[@intFromEnum(self)];
+ }
+
+ fn llvmName(self: Instruction.Index, wip: *const WipFunction) [*:0]const u8 {
+ return if (wip.builder.strip)
+ ""
+ else
+ wip.names.items[@intFromEnum(self)].toSlice(wip.builder).?;
+ }
+ };
+
+ pub const ExtraIndex = u32;
+
+ pub const BrCond = struct {
+ cond: Value,
+ then: Block.Index,
+ @"else": Block.Index,
+ };
+
+ pub const Switch = struct {
+ val: Value,
+ default: Block.Index,
+ cases_len: u32,
+ //case_vals: [cases_len]Constant,
+ //case_blocks: [cases_len]Block.Index,
+ };
+
+ pub const Binary = struct {
+ lhs: Value,
+ rhs: Value,
+ };
+
+ pub const ExtractElement = struct {
+ val: Value,
+ index: Value,
+ };
+
+ pub const InsertElement = struct {
+ val: Value,
+ elem: Value,
+ index: Value,
+ };
+
+ pub const ShuffleVector = struct {
+ lhs: Value,
+ rhs: Value,
+ mask: Value,
+ };
+
+ pub const ExtractValue = struct {
+ val: Value,
+ indices_len: u32,
+ //indices: [indices_len]u32,
+ };
+
+ pub const InsertValue = struct {
+ val: Value,
+ elem: Value,
+ indices_len: u32,
+ //indices: [indices_len]u32,
+ };
+
+ pub const Alloca = struct {
+ type: Type,
+ len: Value,
+ info: Info,
+
+ pub const Kind = enum { normal, inalloca };
+ pub const Info = packed struct(u32) {
+ alignment: Alignment,
+ addr_space: AddrSpace,
+ _: u2 = undefined,
+ };
+ };
+
+ pub const Load = struct {
+ type: Type,
+ ptr: Value,
+ info: MemoryAccessInfo,
+ };
+
+ pub const Store = struct {
+ val: Value,
+ ptr: Value,
+ info: MemoryAccessInfo,
+ };
+
+ pub const GetElementPtr = struct {
+ type: Type,
+ base: Value,
+ indices_len: u32,
+ //indices: [indices_len]Value,
+
+ pub const Kind = Constant.GetElementPtr.Kind;
+ };
+
+ pub const Cast = struct {
+ val: Value,
+ type: Type,
+
+ pub const Signedness = Constant.Cast.Signedness;
+ };
+
+ pub const Phi = struct {
+ type: Type,
+ //incoming_vals: [block.incoming]Value,
+ //incoming_blocks: [block.incoming]Block.Index,
+ };
+
+ pub const Select = struct {
+ cond: Value,
+ lhs: Value,
+ rhs: Value,
+ };
+
+ pub const VaArg = struct {
+ list: Value,
+ type: Type,
+ };
+ };
+
+ pub fn deinit(self: *Function, gpa: Allocator) void {
+ gpa.free(self.extra);
+ if (self.metadata) |metadata| gpa.free(metadata[0..self.instructions.len]);
+ gpa.free(self.names[0..self.instructions.len]);
+ self.instructions.deinit(gpa);
+ self.* = undefined;
+ }
+
+ pub fn arg(self: *const Function, index: u32) Value {
+ const argument = self.instructions.get(index);
+ assert(argument.tag == .arg);
+ assert(argument.data == index);
+
+ const argument_index: Instruction.Index = @enumFromInt(index);
+ return argument_index.toValue();
+ }
+
+ const ExtraDataTrail = struct {
+ index: Instruction.ExtraIndex,
+
+ fn nextMut(self: *ExtraDataTrail, len: u32, comptime Item: type, function: *Function) []Item {
+ const items: []Item = @ptrCast(function.extra[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+
+ fn next(
+ self: *ExtraDataTrail,
+ len: u32,
+ comptime Item: type,
+ function: *const Function,
+ ) []const Item {
+ const items: []const Item = @ptrCast(function.extra[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+ };
+
+ fn extraDataTrail(
+ self: *const Function,
+ comptime T: type,
+ index: Instruction.ExtraIndex,
+ ) struct { data: T, trail: ExtraDataTrail } {
+ var result: T = undefined;
+ const fields = @typeInfo(T).Struct.fields;
+ inline for (fields, self.extra[index..][0..fields.len]) |field, value|
+ @field(result, field.name) = switch (field.type) {
+ u32 => value,
+ Alignment, AtomicOrdering, Block.Index, Type, Value => @enumFromInt(value),
+ MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ };
+ return .{
+ .data = result,
+ .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) },
+ };
+ }
+
+ fn extraData(self: *const Function, comptime T: type, index: Instruction.ExtraIndex) T {
+ return self.extraDataTrail(T, index).data;
+ }
+};
+
+pub const WipFunction = struct {
+ builder: *Builder,
+ function: Function.Index,
+ llvm: if (build_options.have_llvm) struct {
+ builder: *llvm.Builder,
+ blocks: std.ArrayListUnmanaged(*llvm.BasicBlock),
+ instructions: std.ArrayListUnmanaged(*llvm.Value),
+ } else void,
+ cursor: Cursor,
+ blocks: std.ArrayListUnmanaged(Block),
+ instructions: std.MultiArrayList(Instruction),
+ names: std.ArrayListUnmanaged(String),
+ metadata: std.ArrayListUnmanaged(Metadata),
+ extra: std.ArrayListUnmanaged(u32),
+
+ pub const Cursor = struct { block: Block.Index, instruction: u32 = 0 };
+
+ pub const Block = struct {
+ name: String,
+ incoming: u32,
+ branches: u32 = 0,
+ instructions: std.ArrayListUnmanaged(Instruction.Index),
+
+ const Index = enum(u32) {
+ entry,
+ _,
+
+ pub fn ptr(self: Index, wip: *WipFunction) *Block {
+ return &wip.blocks.items[@intFromEnum(self)];
+ }
+
+ pub fn ptrConst(self: Index, wip: *const WipFunction) *const Block {
+ return &wip.blocks.items[@intFromEnum(self)];
+ }
+
+ pub fn toInst(self: Index, function: *const Function) Instruction.Index {
+ return function.blocks[@intFromEnum(self)].instruction;
+ }
+
+ pub fn toLlvm(self: Index, wip: *const WipFunction) *llvm.BasicBlock {
+ assert(wip.builder.useLibLlvm());
+ return wip.llvm.blocks.items[@intFromEnum(self)];
+ }
+ };
+ };
+
+ pub const Instruction = Function.Instruction;
+
+ pub fn init(builder: *Builder, function: Function.Index) Allocator.Error!WipFunction {
+ if (builder.useLibLlvm()) {
+ const llvm_function = function.toLlvm(builder);
+ while (llvm_function.getFirstBasicBlock()) |bb| bb.deleteBasicBlock();
+ }
+
+ var self = WipFunction{
+ .builder = builder,
+ .function = function,
+ .llvm = if (builder.useLibLlvm()) .{
+ .builder = builder.llvm.context.createBuilder(),
+ .blocks = .{},
+ .instructions = .{},
+ } else undefined,
+ .cursor = undefined,
+ .blocks = .{},
+ .instructions = .{},
+ .names = .{},
+ .metadata = .{},
+ .extra = .{},
+ };
+ errdefer self.deinit();
+
+ const params_len = function.typeOf(self.builder).functionParameters(self.builder).len;
+ try self.ensureUnusedExtraCapacity(params_len, NoExtra, 0);
+ try self.instructions.ensureUnusedCapacity(self.builder.gpa, params_len);
+ if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, params_len);
+ if (self.builder.useLibLlvm())
+ try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, params_len);
+ for (0..params_len) |param_index| {
+ self.instructions.appendAssumeCapacity(.{ .tag = .arg, .data = @intCast(param_index) });
+ if (!self.builder.strip) self.names.appendAssumeCapacity(.empty); // TODO: param names
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ function.toLlvm(self.builder).getParam(@intCast(param_index)),
+ );
+ }
+
+ return self;
+ }
+
+ pub fn arg(self: *const WipFunction, index: u32) Value {
+ const argument = self.instructions.get(index);
+ assert(argument.tag == .arg);
+ assert(argument.data == index);
+
+ const argument_index: Instruction.Index = @enumFromInt(index);
+ return argument_index.toValue();
+ }
+
+ pub fn block(self: *WipFunction, incoming: u32, name: []const u8) Allocator.Error!Block.Index {
+ try self.blocks.ensureUnusedCapacity(self.builder.gpa, 1);
+ if (self.builder.useLibLlvm()) try self.llvm.blocks.ensureUnusedCapacity(self.builder.gpa, 1);
+
+ const index: Block.Index = @enumFromInt(self.blocks.items.len);
+ const final_name = if (self.builder.strip) .empty else try self.builder.string(name);
+ self.blocks.appendAssumeCapacity(.{
+ .name = final_name,
+ .incoming = incoming,
+ .instructions = .{},
+ });
+ if (self.builder.useLibLlvm()) self.llvm.blocks.appendAssumeCapacity(
+ self.builder.llvm.context.appendBasicBlock(
+ self.function.toLlvm(self.builder),
+ final_name.toSlice(self.builder).?,
+ ),
+ );
+ return index;
+ }
+
+ pub fn ret(self: *WipFunction, val: Value) Allocator.Error!Instruction.Index {
+ assert(val.typeOfWip(self) == self.function.typeOf(self.builder).functionReturn(self.builder));
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(null, .{ .tag = .ret, .data = @intFromEnum(val) });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildRet(val.toLlvm(self)),
+ );
+ return instruction;
+ }
+
+ pub fn retVoid(self: *WipFunction) Allocator.Error!Instruction.Index {
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(null, .{ .tag = .@"ret void", .data = undefined });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildRetVoid(),
+ );
+ return instruction;
+ }
+
+ pub fn br(self: *WipFunction, dest: Block.Index) Allocator.Error!Instruction.Index {
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(null, .{ .tag = .br, .data = @intFromEnum(dest) });
+ dest.ptr(self).branches += 1;
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildBr(dest.toLlvm(self)),
+ );
+ return instruction;
+ }
+
+ pub fn brCond(
+ self: *WipFunction,
+ cond: Value,
+ then: Block.Index,
+ @"else": Block.Index,
+ ) Allocator.Error!Instruction.Index {
+ assert(cond.typeOfWip(self) == .i1);
+ try self.ensureUnusedExtraCapacity(1, Instruction.BrCond, 0);
+ const instruction = try self.addInst(null, .{
+ .tag = .br_cond,
+ .data = self.addExtraAssumeCapacity(Instruction.BrCond{
+ .cond = cond,
+ .then = then,
+ .@"else" = @"else",
+ }),
+ });
+ then.ptr(self).branches += 1;
+ @"else".ptr(self).branches += 1;
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildCondBr(cond.toLlvm(self), then.toLlvm(self), @"else".toLlvm(self)),
+ );
+ return instruction;
+ }
+
+ pub const WipSwitch = struct {
+ index: u32,
+ instruction: Instruction.Index,
+
+ pub fn addCase(
+ self: *WipSwitch,
+ val: Constant,
+ dest: Block.Index,
+ wip: *WipFunction,
+ ) Allocator.Error!void {
+ const instruction = wip.instructions.get(@intFromEnum(self.instruction));
+ var extra = wip.extraDataTrail(Instruction.Switch, instruction.data);
+ assert(val.typeOf(wip.builder) == extra.data.val.typeOfWip(wip));
+ extra.trail.nextMut(extra.data.cases_len, Constant, wip)[self.index] = val;
+ extra.trail.nextMut(extra.data.cases_len, Block.Index, wip)[self.index] = dest;
+ self.index += 1;
+ dest.ptr(wip).branches += 1;
+ if (wip.builder.useLibLlvm())
+ self.instruction.toLlvm(wip).addCase(val.toLlvm(wip.builder), dest.toLlvm(wip));
+ }
+
+ pub fn finish(self: WipSwitch, wip: *WipFunction) void {
+ const instruction = wip.instructions.get(@intFromEnum(self.instruction));
+ const extra = wip.extraData(Instruction.Switch, instruction.data);
+ assert(self.index == extra.cases_len);
+ }
+ };
+
+ pub fn @"switch"(
+ self: *WipFunction,
+ val: Value,
+ default: Block.Index,
+ cases_len: u32,
+ ) Allocator.Error!WipSwitch {
+ try self.ensureUnusedExtraCapacity(1, Instruction.Switch, cases_len * 2);
+ const instruction = try self.addInst(null, .{
+ .tag = .@"switch",
+ .data = self.addExtraAssumeCapacity(Instruction.Switch{
+ .val = val,
+ .default = default,
+ .cases_len = cases_len,
+ }),
+ });
+ _ = self.extra.addManyAsSliceAssumeCapacity(cases_len * 2);
+ default.ptr(self).branches += 1;
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildSwitch(val.toLlvm(self), default.toLlvm(self), @intCast(cases_len)),
+ );
+ return .{ .index = 0, .instruction = instruction };
+ }
+
+ pub fn @"unreachable"(self: *WipFunction) Allocator.Error!Instruction.Index {
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(null, .{ .tag = .@"unreachable", .data = undefined });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildUnreachable(),
+ );
+ return instruction;
+ }
+
+ pub fn un(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ val: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ switch (tag) {
+ .fneg,
+ .@"fneg fast",
+ => assert(val.typeOfWip(self).scalarType(self.builder).isFloatingPoint()),
+ else => unreachable,
+ }
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(name, .{ .tag = tag, .data = @intFromEnum(val) });
+ if (self.builder.useLibLlvm()) {
+ switch (tag) {
+ .fneg => self.llvm.builder.setFastMath(false),
+ .@"fneg fast" => self.llvm.builder.setFastMath(true),
+ else => unreachable,
+ }
+ self.llvm.instructions.appendAssumeCapacity(switch (tag) {
+ .fneg, .@"fneg fast" => &llvm.Builder.buildFNeg,
+ else => unreachable,
+ }(self.llvm.builder, val.toLlvm(self), instruction.llvmName(self)));
+ }
+ return instruction.toValue();
+ }
+
+ pub fn not(self: *WipFunction, val: Value, name: []const u8) Allocator.Error!Value {
+ const ty = val.typeOfWip(self);
+ const all_ones = try self.builder.splatValue(
+ ty,
+ try self.builder.intConst(ty.scalarType(self.builder), -1),
+ );
+ return self.bin(.xor, val, all_ones, name);
+ }
+
+ pub fn neg(self: *WipFunction, val: Value, name: []const u8) Allocator.Error!Value {
+ return self.bin(.sub, try self.builder.zeroInitValue(val.typeOfWip(self)), val, name);
+ }
+
+ pub fn bin(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ switch (tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .@"and",
+ .ashr,
+ .@"ashr exact",
+ .fadd,
+ .@"fadd fast",
+ .fdiv,
+ .@"fdiv fast",
+ .fmul,
+ .@"fmul fast",
+ .frem,
+ .@"frem fast",
+ .fsub,
+ .@"fsub fast",
+ .@"llvm.maxnum.",
+ .@"llvm.minnum.",
+ .@"llvm.sadd.sat.",
+ .@"llvm.smax.",
+ .@"llvm.smin.",
+ .@"llvm.smul.fix.sat.",
+ .@"llvm.sshl.sat.",
+ .@"llvm.ssub.sat.",
+ .@"llvm.uadd.sat.",
+ .@"llvm.umax.",
+ .@"llvm.umin.",
+ .@"llvm.umul.fix.sat.",
+ .@"llvm.ushl.sat.",
+ .@"llvm.usub.sat.",
+ .lshr,
+ .@"lshr exact",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .@"or",
+ .sdiv,
+ .@"sdiv exact",
+ .shl,
+ .@"shl nsw",
+ .@"shl nuw",
+ .srem,
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .udiv,
+ .@"udiv exact",
+ .urem,
+ .xor,
+ => assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)),
+ else => unreachable,
+ }
+ try self.ensureUnusedExtraCapacity(1, Instruction.Binary, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = tag,
+ .data = self.addExtraAssumeCapacity(Instruction.Binary{ .lhs = lhs, .rhs = rhs }),
+ });
+ if (self.builder.useLibLlvm()) {
+ switch (tag) {
+ .fadd,
+ .fdiv,
+ .fmul,
+ .frem,
+ .fsub,
+ => self.llvm.builder.setFastMath(false),
+ .@"fadd fast",
+ .@"fdiv fast",
+ .@"fmul fast",
+ .@"frem fast",
+ .@"fsub fast",
+ => self.llvm.builder.setFastMath(true),
+ else => {},
+ }
+ self.llvm.instructions.appendAssumeCapacity(switch (tag) {
+ .add => &llvm.Builder.buildAdd,
+ .@"add nsw" => &llvm.Builder.buildNSWAdd,
+ .@"add nuw" => &llvm.Builder.buildNUWAdd,
+ .@"and" => &llvm.Builder.buildAnd,
+ .ashr => &llvm.Builder.buildAShr,
+ .@"ashr exact" => &llvm.Builder.buildAShrExact,
+ .fadd, .@"fadd fast" => &llvm.Builder.buildFAdd,
+ .fdiv, .@"fdiv fast" => &llvm.Builder.buildFDiv,
+ .fmul, .@"fmul fast" => &llvm.Builder.buildFMul,
+ .frem, .@"frem fast" => &llvm.Builder.buildFRem,
+ .fsub, .@"fsub fast" => &llvm.Builder.buildFSub,
+ .@"llvm.maxnum." => &llvm.Builder.buildMaxNum,
+ .@"llvm.minnum." => &llvm.Builder.buildMinNum,
+ .@"llvm.sadd.sat." => &llvm.Builder.buildSAddSat,
+ .@"llvm.smax." => &llvm.Builder.buildSMax,
+ .@"llvm.smin." => &llvm.Builder.buildSMin,
+ .@"llvm.smul.fix.sat." => &llvm.Builder.buildSMulFixSat,
+ .@"llvm.sshl.sat." => &llvm.Builder.buildSShlSat,
+ .@"llvm.ssub.sat." => &llvm.Builder.buildSSubSat,
+ .@"llvm.uadd.sat." => &llvm.Builder.buildUAddSat,
+ .@"llvm.umax." => &llvm.Builder.buildUMax,
+ .@"llvm.umin." => &llvm.Builder.buildUMin,
+ .@"llvm.umul.fix.sat." => &llvm.Builder.buildUMulFixSat,
+ .@"llvm.ushl.sat." => &llvm.Builder.buildUShlSat,
+ .@"llvm.usub.sat." => &llvm.Builder.buildUSubSat,
+ .lshr => &llvm.Builder.buildLShr,
+ .@"lshr exact" => &llvm.Builder.buildLShrExact,
+ .mul => &llvm.Builder.buildMul,
+ .@"mul nsw" => &llvm.Builder.buildNSWMul,
+ .@"mul nuw" => &llvm.Builder.buildNUWMul,
+ .@"or" => &llvm.Builder.buildOr,
+ .sdiv => &llvm.Builder.buildSDiv,
+ .@"sdiv exact" => &llvm.Builder.buildExactSDiv,
+ .shl => &llvm.Builder.buildShl,
+ .@"shl nsw" => &llvm.Builder.buildNSWShl,
+ .@"shl nuw" => &llvm.Builder.buildNUWShl,
+ .srem => &llvm.Builder.buildSRem,
+ .sub => &llvm.Builder.buildSub,
+ .@"sub nsw" => &llvm.Builder.buildNSWSub,
+ .@"sub nuw" => &llvm.Builder.buildNUWSub,
+ .udiv => &llvm.Builder.buildUDiv,
+ .@"udiv exact" => &llvm.Builder.buildExactUDiv,
+ .urem => &llvm.Builder.buildURem,
+ .xor => &llvm.Builder.buildXor,
+ else => unreachable,
+ }(self.llvm.builder, lhs.toLlvm(self), rhs.toLlvm(self), instruction.llvmName(self)));
+ }
+ return instruction.toValue();
+ }
+
+ pub fn extractElement(
+ self: *WipFunction,
+ val: Value,
+ index: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(val.typeOfWip(self).isVector(self.builder));
+ assert(index.typeOfWip(self).isInteger(self.builder));
+ try self.ensureUnusedExtraCapacity(1, Instruction.ExtractElement, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .extractelement,
+ .data = self.addExtraAssumeCapacity(Instruction.ExtractElement{
+ .val = val,
+ .index = index,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildExtractElement(
+ val.toLlvm(self),
+ index.toLlvm(self),
+ instruction.llvmName(self),
+ ),
+ );
+ return instruction.toValue();
+ }
+
+ pub fn insertElement(
+ self: *WipFunction,
+ val: Value,
+ elem: Value,
+ index: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(val.typeOfWip(self).scalarType(self.builder) == elem.typeOfWip(self));
+ assert(index.typeOfWip(self).isInteger(self.builder));
+ try self.ensureUnusedExtraCapacity(1, Instruction.InsertElement, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .insertelement,
+ .data = self.addExtraAssumeCapacity(Instruction.InsertElement{
+ .val = val,
+ .elem = elem,
+ .index = index,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildInsertElement(
+ val.toLlvm(self),
+ elem.toLlvm(self),
+ index.toLlvm(self),
+ instruction.llvmName(self),
+ ),
+ );
+ return instruction.toValue();
+ }
+
+ pub fn shuffleVector(
+ self: *WipFunction,
+ lhs: Value,
+ rhs: Value,
+ mask: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(lhs.typeOfWip(self).isVector(self.builder));
+ assert(lhs.typeOfWip(self) == rhs.typeOfWip(self));
+ assert(mask.typeOfWip(self).scalarType(self.builder).isInteger(self.builder));
+ _ = try self.ensureUnusedExtraCapacity(1, Instruction.ShuffleVector, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .shufflevector,
+ .data = self.addExtraAssumeCapacity(Instruction.ShuffleVector{
+ .lhs = lhs,
+ .rhs = rhs,
+ .mask = mask,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildShuffleVector(
+ lhs.toLlvm(self),
+ rhs.toLlvm(self),
+ mask.toLlvm(self),
+ instruction.llvmName(self),
+ ),
+ );
+ return instruction.toValue();
+ }
+
+ pub fn splatVector(
+ self: *WipFunction,
+ ty: Type,
+ elem: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ const scalar_ty = try ty.changeLength(1, self.builder);
+ const mask_ty = try ty.changeScalar(.i32, self.builder);
+ const zero = try self.builder.intConst(.i32, 0);
+ const poison = try self.builder.poisonValue(scalar_ty);
+ const mask = try self.builder.splatValue(mask_ty, zero);
+ const scalar = try self.insertElement(poison, elem, zero.toValue(), name);
+ return self.shuffleVector(scalar, poison, mask, name);
+ }
+
+ pub fn extractValue(
+ self: *WipFunction,
+ val: Value,
+ indices: []const u32,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(indices.len > 0);
+ _ = val.typeOfWip(self).childTypeAt(indices, self.builder);
+ try self.ensureUnusedExtraCapacity(1, Instruction.ExtractValue, indices.len);
+ const instruction = try self.addInst(name, .{
+ .tag = .extractvalue,
+ .data = self.addExtraAssumeCapacity(Instruction.ExtractValue{
+ .val = val,
+ .indices_len = @intCast(indices.len),
+ }),
+ });
+ self.extra.appendSliceAssumeCapacity(indices);
+ if (self.builder.useLibLlvm()) {
+ const llvm_name = instruction.llvmName(self);
+ var cur = val.toLlvm(self);
+ for (indices) |index|
+ cur = self.llvm.builder.buildExtractValue(cur, @intCast(index), llvm_name);
+ self.llvm.instructions.appendAssumeCapacity(cur);
+ }
+ return instruction.toValue();
+ }
+
+ pub fn insertValue(
+ self: *WipFunction,
+ val: Value,
+ elem: Value,
+ indices: []const u32,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(indices.len > 0);
+ assert(val.typeOfWip(self).childTypeAt(indices, self.builder) == elem.typeOfWip(self));
+ try self.ensureUnusedExtraCapacity(1, Instruction.InsertValue, indices.len);
+ const instruction = try self.addInst(name, .{
+ .tag = .insertvalue,
+ .data = self.addExtraAssumeCapacity(Instruction.InsertValue{
+ .val = val,
+ .elem = elem,
+ .indices_len = @intCast(indices.len),
+ }),
+ });
+ self.extra.appendSliceAssumeCapacity(indices);
+ if (self.builder.useLibLlvm()) {
+ const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa);
+ const allocator = stack.get();
+
+ const llvm_name = instruction.llvmName(self);
+ const llvm_vals = try allocator.alloc(*llvm.Value, indices.len);
+ defer allocator.free(llvm_vals);
+ llvm_vals[0] = val.toLlvm(self);
+ for (llvm_vals[1..], llvm_vals[0 .. llvm_vals.len - 1], indices[0 .. indices.len - 1]) |
+ *cur_val,
+ prev_val,
+ index,
+ | cur_val.* = self.llvm.builder.buildExtractValue(prev_val, @intCast(index), llvm_name);
+
+ var depth: usize = llvm_vals.len;
+ var cur = elem.toLlvm(self);
+ while (depth > 0) {
+ depth -= 1;
+ cur = self.llvm.builder.buildInsertValue(
+ llvm_vals[depth],
+ cur,
+ @intCast(indices[depth]),
+ llvm_name,
+ );
+ }
+ self.llvm.instructions.appendAssumeCapacity(cur);
+ }
+ return instruction.toValue();
+ }
+
+ pub fn buildAggregate(
+ self: *WipFunction,
+ ty: Type,
+ elems: []const Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(ty.aggregateLen(self.builder) == elems.len);
+ var cur = try self.builder.poisonValue(ty);
+ for (elems, 0..) |elem, index|
+ cur = try self.insertValue(cur, elem, &[_]u32{@intCast(index)}, name);
+ return cur;
+ }
+
+ pub fn alloca(
+ self: *WipFunction,
+ kind: Instruction.Alloca.Kind,
+ ty: Type,
+ len: Value,
+ alignment: Alignment,
+ addr_space: AddrSpace,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(len == .none or len.typeOfWip(self).isInteger(self.builder));
+ _ = try self.builder.ptrType(addr_space);
+ try self.ensureUnusedExtraCapacity(1, Instruction.Alloca, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = switch (kind) {
+ .normal => .alloca,
+ .inalloca => .@"alloca inalloca",
+ },
+ .data = self.addExtraAssumeCapacity(Instruction.Alloca{
+ .type = ty,
+ .len = len,
+ .info = .{ .alignment = alignment, .addr_space = addr_space },
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ const llvm_instruction = self.llvm.builder.buildAllocaInAddressSpace(
+ ty.toLlvm(self.builder),
+ @intFromEnum(addr_space),
+ instruction.llvmName(self),
+ );
+ if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a));
+ self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
+ }
+ return instruction.toValue();
+ }
+
+ pub fn load(
+ self: *WipFunction,
+ kind: MemoryAccessKind,
+ ty: Type,
+ ptr: Value,
+ alignment: Alignment,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.loadAtomic(kind, ty, ptr, .system, .none, alignment, name);
+ }
+
+ pub fn loadAtomic(
+ self: *WipFunction,
+ kind: MemoryAccessKind,
+ ty: Type,
+ ptr: Value,
+ scope: SyncScope,
+ ordering: AtomicOrdering,
+ alignment: Alignment,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(ptr.typeOfWip(self).isPointer(self.builder));
+ try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = switch (ordering) {
+ .none => switch (kind) {
+ .normal => .load,
+ .@"volatile" => .@"load volatile",
+ },
+ else => switch (kind) {
+ .normal => .@"load atomic",
+ .@"volatile" => .@"load atomic volatile",
+ },
+ },
+ .data = self.addExtraAssumeCapacity(Instruction.Load{
+ .type = ty,
+ .ptr = ptr,
+ .info = .{ .scope = switch (ordering) {
+ .none => .system,
+ else => scope,
+ }, .ordering = ordering, .alignment = alignment },
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ const llvm_instruction = self.llvm.builder.buildLoad(
+ ty.toLlvm(self.builder),
+ ptr.toLlvm(self),
+ instruction.llvmName(self),
+ );
+ if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering)));
+ if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a));
+ self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
+ }
+ return instruction.toValue();
+ }
+
+ pub fn store(
+ self: *WipFunction,
+ kind: MemoryAccessKind,
+ val: Value,
+ ptr: Value,
+ alignment: Alignment,
+ ) Allocator.Error!Instruction.Index {
+ return self.storeAtomic(kind, val, ptr, .system, .none, alignment);
+ }
+
+ pub fn storeAtomic(
+ self: *WipFunction,
+ kind: MemoryAccessKind,
+ val: Value,
+ ptr: Value,
+ scope: SyncScope,
+ ordering: AtomicOrdering,
+ alignment: Alignment,
+ ) Allocator.Error!Instruction.Index {
+ assert(ptr.typeOfWip(self).isPointer(self.builder));
+ try self.ensureUnusedExtraCapacity(1, Instruction.Store, 0);
+ const instruction = try self.addInst(null, .{
+ .tag = switch (ordering) {
+ .none => switch (kind) {
+ .normal => .store,
+ .@"volatile" => .@"store volatile",
+ },
+ else => switch (kind) {
+ .normal => .@"store atomic",
+ .@"volatile" => .@"store atomic volatile",
+ },
+ },
+ .data = self.addExtraAssumeCapacity(Instruction.Store{
+ .val = val,
+ .ptr = ptr,
+ .info = .{ .scope = switch (ordering) {
+ .none => .system,
+ else => scope,
+ }, .ordering = ordering, .alignment = alignment },
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ const llvm_instruction = self.llvm.builder.buildStore(val.toLlvm(self), ptr.toLlvm(self));
+ switch (kind) {
+ .normal => {},
+ .@"volatile" => llvm_instruction.setVolatile(.True),
+ }
+ if (ordering != .none) llvm_instruction.setOrdering(@enumFromInt(@intFromEnum(ordering)));
+ if (alignment.toByteUnits()) |a| llvm_instruction.setAlignment(@intCast(a));
+ self.llvm.instructions.appendAssumeCapacity(llvm_instruction);
+ }
+ return instruction;
+ }
+
+ pub fn fence(
+ self: *WipFunction,
+ scope: SyncScope,
+ ordering: AtomicOrdering,
+ ) Allocator.Error!Instruction.Index {
+ assert(ordering != .none);
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(null, .{
+ .tag = .fence,
+ .data = @bitCast(MemoryAccessInfo{
+ .scope = scope,
+ .ordering = ordering,
+ .alignment = undefined,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildFence(
+ @enumFromInt(@intFromEnum(ordering)),
+ llvm.Bool.fromBool(scope == .singlethread),
+ "",
+ ),
+ );
+ return instruction;
+ }
+
+ pub fn gep(
+ self: *WipFunction,
+ kind: Instruction.GetElementPtr.Kind,
+ ty: Type,
+ base: Value,
+ indices: []const Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ const base_ty = base.typeOfWip(self);
+ const base_is_vector = base_ty.isVector(self.builder);
+
+ const VectorInfo = struct {
+ kind: Type.Vector.Kind,
+ len: u32,
+
+ fn init(vector_ty: Type, builder: *const Builder) @This() {
+ return .{ .kind = vector_ty.vectorKind(builder), .len = vector_ty.vectorLen(builder) };
+ }
+ };
+ var vector_info: ?VectorInfo =
+ if (base_is_vector) VectorInfo.init(base_ty, self.builder) else null;
+ for (indices) |index| {
+ const index_ty = index.typeOfWip(self);
+ switch (index_ty.tag(self.builder)) {
+ .integer => {},
+ .vector, .scalable_vector => {
+ const index_info = VectorInfo.init(index_ty, self.builder);
+ if (vector_info) |info|
+ assert(std.meta.eql(info, index_info))
+ else
+ vector_info = index_info;
+ },
+ else => unreachable,
+ }
+ }
+ if (!base_is_vector) if (vector_info) |info| switch (info.kind) {
+ inline else => |vector_kind| _ = try self.builder.vectorType(
+ vector_kind,
+ info.len,
+ base_ty,
+ ),
+ };
+
+ try self.ensureUnusedExtraCapacity(1, Instruction.GetElementPtr, indices.len);
+ const instruction = try self.addInst(name, .{
+ .tag = switch (kind) {
+ .normal => .getelementptr,
+ .inbounds => .@"getelementptr inbounds",
+ },
+ .data = self.addExtraAssumeCapacity(Instruction.GetElementPtr{
+ .type = ty,
+ .base = base,
+ .indices_len = @intCast(indices.len),
+ }),
+ });
+ self.extra.appendSliceAssumeCapacity(@ptrCast(indices));
+ if (self.builder.useLibLlvm()) {
+ const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.builder.gpa);
+ const allocator = stack.get();
+
+ const llvm_indices = try allocator.alloc(*llvm.Value, indices.len);
+ defer allocator.free(llvm_indices);
+ for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self);
+
+ self.llvm.instructions.appendAssumeCapacity(switch (kind) {
+ .normal => &llvm.Builder.buildGEP,
+ .inbounds => &llvm.Builder.buildInBoundsGEP,
+ }(
+ self.llvm.builder,
+ ty.toLlvm(self.builder),
+ base.toLlvm(self),
+ llvm_indices.ptr,
+ @intCast(llvm_indices.len),
+ instruction.llvmName(self),
+ ));
+ }
+ return instruction.toValue();
+ }
+
+ pub fn gepStruct(
+ self: *WipFunction,
+ ty: Type,
+ base: Value,
+ index: usize,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ assert(ty.isStruct(self.builder));
+ return self.gep(.inbounds, ty, base, &.{
+ try self.builder.intValue(.i32, 0), try self.builder.intValue(.i32, index),
+ }, name);
+ }
+
+ pub fn conv(
+ self: *WipFunction,
+ signedness: Instruction.Cast.Signedness,
+ val: Value,
+ ty: Type,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ const val_ty = val.typeOfWip(self);
+ if (val_ty == ty) return val;
+ return self.cast(self.builder.convTag(Instruction.Tag, signedness, val_ty, ty), val, ty, name);
+ }
+
+ pub fn cast(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ val: Value,
+ ty: Type,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ switch (tag) {
+ .addrspacecast,
+ .bitcast,
+ .fpext,
+ .fptosi,
+ .fptoui,
+ .fptrunc,
+ .inttoptr,
+ .ptrtoint,
+ .sext,
+ .sitofp,
+ .trunc,
+ .uitofp,
+ .zext,
+ => {},
+ else => unreachable,
+ }
+ if (val.typeOfWip(self) == ty) return val;
+ try self.ensureUnusedExtraCapacity(1, Instruction.Cast, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = tag,
+ .data = self.addExtraAssumeCapacity(Instruction.Cast{
+ .val = val,
+ .type = ty,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(switch (tag) {
+ .addrspacecast => &llvm.Builder.buildAddrSpaceCast,
+ .bitcast => &llvm.Builder.buildBitCast,
+ .fpext => &llvm.Builder.buildFPExt,
+ .fptosi => &llvm.Builder.buildFPToSI,
+ .fptoui => &llvm.Builder.buildFPToUI,
+ .fptrunc => &llvm.Builder.buildFPTrunc,
+ .inttoptr => &llvm.Builder.buildIntToPtr,
+ .ptrtoint => &llvm.Builder.buildPtrToInt,
+ .sext => &llvm.Builder.buildSExt,
+ .sitofp => &llvm.Builder.buildSIToFP,
+ .trunc => &llvm.Builder.buildTrunc,
+ .uitofp => &llvm.Builder.buildUIToFP,
+ .zext => &llvm.Builder.buildZExt,
+ else => unreachable,
+ }(self.llvm.builder, val.toLlvm(self), ty.toLlvm(self.builder), instruction.llvmName(self)));
+ return instruction.toValue();
+ }
+
+ pub fn icmp(
+ self: *WipFunction,
+ cond: IntegerCondition,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.cmpTag(switch (cond) {
+ inline else => |tag| @field(Instruction.Tag, "icmp " ++ @tagName(tag)),
+ }, @intFromEnum(cond), lhs, rhs, name);
+ }
+
+ pub fn fcmp(
+ self: *WipFunction,
+ cond: FloatCondition,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.cmpTag(switch (cond) {
+ inline else => |tag| @field(Instruction.Tag, "fcmp " ++ @tagName(tag)),
+ }, @intFromEnum(cond), lhs, rhs, name);
+ }
+
+ pub fn fcmpFast(
+ self: *WipFunction,
+ cond: FloatCondition,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.cmpTag(switch (cond) {
+ inline else => |tag| @field(Instruction.Tag, "fcmp fast " ++ @tagName(tag)),
+ }, @intFromEnum(cond), lhs, rhs, name);
+ }
+
+ pub const WipPhi = struct {
+ block: Block.Index,
+ instruction: Instruction.Index,
+
+ pub fn toValue(self: WipPhi) Value {
+ return self.instruction.toValue();
+ }
+
+ pub fn finish(
+ self: WipPhi,
+ vals: []const Value,
+ blocks: []const Block.Index,
+ wip: *WipFunction,
+ ) if (build_options.have_llvm) Allocator.Error!void else void {
+ const incoming_len = self.block.ptrConst(wip).incoming;
+ assert(vals.len == incoming_len and blocks.len == incoming_len);
+ const instruction = wip.instructions.get(@intFromEnum(self.instruction));
+ var extra = wip.extraDataTrail(Instruction.Phi, instruction.data);
+ for (vals) |val| assert(val.typeOfWip(wip) == extra.data.type);
+ @memcpy(extra.trail.nextMut(incoming_len, Value, wip), vals);
+ @memcpy(extra.trail.nextMut(incoming_len, Block.Index, wip), blocks);
+ if (wip.builder.useLibLlvm()) {
+ const ExpectedContents = extern struct {
+ [expected_incoming_len]*llvm.Value,
+ [expected_incoming_len]*llvm.BasicBlock,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), wip.builder.gpa);
+ const allocator = stack.get();
+
+ const llvm_vals = try allocator.alloc(*llvm.Value, incoming_len);
+ defer allocator.free(llvm_vals);
+ const llvm_blocks = try allocator.alloc(*llvm.BasicBlock, incoming_len);
+ defer allocator.free(llvm_blocks);
+
+ for (llvm_vals, vals) |*llvm_val, incoming_val| llvm_val.* = incoming_val.toLlvm(wip);
+ for (llvm_blocks, blocks) |*llvm_block, incoming_block|
+ llvm_block.* = incoming_block.toLlvm(wip);
+ self.instruction.toLlvm(wip)
+ .addIncoming(llvm_vals.ptr, llvm_blocks.ptr, @intCast(incoming_len));
+ }
+ }
+ };
+
+ pub fn phi(self: *WipFunction, ty: Type, name: []const u8) Allocator.Error!WipPhi {
+ return self.phiTag(.phi, ty, name);
+ }
+
+ pub fn phiFast(self: *WipFunction, ty: Type, name: []const u8) Allocator.Error!WipPhi {
+ return self.phiTag(.@"phi fast", ty, name);
+ }
+
+ pub fn select(
+ self: *WipFunction,
+ cond: Value,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.selectTag(.select, cond, lhs, rhs, name);
+ }
+
+ pub fn selectFast(
+ self: *WipFunction,
+ cond: Value,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ return self.selectTag(.@"select fast", cond, lhs, rhs, name);
+ }
+
+ pub fn vaArg(self: *WipFunction, list: Value, ty: Type, name: []const u8) Allocator.Error!Value {
+ try self.ensureUnusedExtraCapacity(1, Instruction.VaArg, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .va_arg,
+ .data = self.addExtraAssumeCapacity(Instruction.VaArg{
+ .list = list,
+ .type = ty,
+ }),
+ });
+ if (self.builder.useLibLlvm()) self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildVAArg(
+ list.toLlvm(self),
+ ty.toLlvm(self.builder),
+ instruction.llvmName(self),
+ ),
+ );
+ return instruction.toValue();
+ }
+
+ pub const WipUnimplemented = struct {
+ instruction: Instruction.Index,
+
+ pub fn finish(self: WipUnimplemented, val: *llvm.Value, wip: *WipFunction) Value {
+ assert(wip.builder.useLibLlvm());
+ wip.llvm.instructions.items[@intFromEnum(self.instruction)] = val;
+ return self.instruction.toValue();
+ }
+ };
+
+ pub fn unimplemented(
+ self: *WipFunction,
+ ty: Type,
+ name: []const u8,
+ ) Allocator.Error!WipUnimplemented {
+ try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = .unimplemented,
+ .data = @intFromEnum(ty),
+ });
+ if (self.builder.useLibLlvm()) _ = self.llvm.instructions.addOneAssumeCapacity();
+ return .{ .instruction = instruction };
+ }
+
+ pub fn finish(self: *WipFunction) Allocator.Error!void {
+ const gpa = self.builder.gpa;
+ const function = self.function.ptr(self.builder);
+ const params_len = self.function.typeOf(self.builder).functionParameters(self.builder).len;
+ const final_instructions_len = self.blocks.items.len + self.instructions.len;
+
+ const blocks = try gpa.alloc(Function.Block, self.blocks.items.len);
+ errdefer gpa.free(blocks);
+
+ const instructions: struct {
+ items: []Instruction.Index,
+
+ fn map(instructions: @This(), val: Value) Value {
+ if (val == .none) return .none;
+ return switch (val.unwrap()) {
+ .instruction => |instruction| instructions.items[
+ @intFromEnum(instruction)
+ ].toValue(),
+ .constant => |constant| constant.toValue(),
+ };
+ }
+ } = .{ .items = try gpa.alloc(Instruction.Index, self.instructions.len) };
+ defer gpa.free(instructions.items);
+
+ const names = try gpa.alloc(String, final_instructions_len);
+ errdefer gpa.free(names);
+
+ const metadata =
+ if (self.builder.strip) null else try gpa.alloc(Metadata, final_instructions_len);
+ errdefer if (metadata) |new_metadata| gpa.free(new_metadata);
+
+ var wip_extra: struct {
+ index: Instruction.ExtraIndex = 0,
+ items: []u32,
+
+ fn addExtra(wip_extra: *@This(), extra: anytype) Instruction.ExtraIndex {
+ const result = wip_extra.index;
+ inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ wip_extra.items[wip_extra.index] = switch (field.type) {
+ u32 => value,
+ Alignment, AtomicOrdering, Block.Index, Type, Value => @intFromEnum(value),
+ MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ };
+ wip_extra.index += 1;
+ }
+ return result;
+ }
+
+ fn appendSlice(wip_extra: *@This(), slice: anytype) void {
+ if (@typeInfo(@TypeOf(slice)).Pointer.child == Value) @compileError("use appendValues");
+ const data: []const u32 = @ptrCast(slice);
+ @memcpy(wip_extra.items[wip_extra.index..][0..data.len], data);
+ wip_extra.index += @intCast(data.len);
+ }
+
+ fn appendValues(wip_extra: *@This(), vals: []const Value, ctx: anytype) void {
+ for (wip_extra.items[wip_extra.index..][0..vals.len], vals) |*extra, val|
+ extra.* = @intFromEnum(ctx.map(val));
+ wip_extra.index += @intCast(vals.len);
+ }
+
+ fn finish(wip_extra: *const @This()) []const u32 {
+ assert(wip_extra.index == wip_extra.items.len);
+ return wip_extra.items;
+ }
+ } = .{ .items = try gpa.alloc(u32, self.extra.items.len) };
+ errdefer gpa.free(wip_extra.items);
+
+ gpa.free(function.blocks);
+ function.blocks = &.{};
+ gpa.free(function.names[0..function.instructions.len]);
+ if (function.metadata) |old_metadata| gpa.free(old_metadata[0..function.instructions.len]);
+ function.metadata = null;
+ gpa.free(function.extra);
+ function.extra = &.{};
+
+ function.instructions.shrinkRetainingCapacity(0);
+ try function.instructions.setCapacity(gpa, final_instructions_len);
+ errdefer function.instructions.shrinkRetainingCapacity(0);
+
+ {
+ var final_instruction_index: Instruction.Index = @enumFromInt(0);
+ for (0..params_len) |param_index| {
+ instructions.items[param_index] = final_instruction_index;
+ final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1);
+ }
+ for (blocks, self.blocks.items) |*final_block, current_block| {
+ assert(current_block.incoming == current_block.branches);
+ final_block.instruction = final_instruction_index;
+ final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1);
+ for (current_block.instructions.items) |instruction| {
+ instructions.items[@intFromEnum(instruction)] = final_instruction_index;
+ final_instruction_index = @enumFromInt(@intFromEnum(final_instruction_index) + 1);
+ }
+ }
+ }
+
+ var wip_name: struct {
+ next_name: String = @enumFromInt(0),
+
+ fn map(wip_name: *@This(), old_name: String) String {
+ if (old_name != .empty) return old_name;
+
+ const new_name = wip_name.next_name;
+ wip_name.next_name = @enumFromInt(@intFromEnum(new_name) + 1);
+ return new_name;
+ }
+ } = .{};
+ for (0..params_len) |param_index| {
+ const old_argument_index: Instruction.Index = @enumFromInt(param_index);
+ const new_argument_index: Instruction.Index = @enumFromInt(function.instructions.len);
+ const argument = self.instructions.get(@intFromEnum(old_argument_index));
+ assert(argument.tag == .arg);
+ assert(argument.data == param_index);
+ function.instructions.appendAssumeCapacity(argument);
+ names[@intFromEnum(new_argument_index)] = wip_name.map(
+ if (self.builder.strip) .empty else self.names.items[@intFromEnum(old_argument_index)],
+ );
+ }
+ for (self.blocks.items) |current_block| {
+ const new_block_index: Instruction.Index = @enumFromInt(function.instructions.len);
+ function.instructions.appendAssumeCapacity(.{
+ .tag = .block,
+ .data = current_block.incoming,
+ });
+ names[@intFromEnum(new_block_index)] = wip_name.map(current_block.name);
+ for (current_block.instructions.items) |old_instruction_index| {
+ const new_instruction_index: Instruction.Index =
+ @enumFromInt(function.instructions.len);
+ var instruction = self.instructions.get(@intFromEnum(old_instruction_index));
+ switch (instruction.tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .@"add nuw nsw",
+ .@"and",
+ .ashr,
+ .@"ashr exact",
+ .fadd,
+ .@"fadd fast",
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ .fdiv,
+ .@"fdiv fast",
+ .fmul,
+ .@"fmul fast",
+ .frem,
+ .@"frem fast",
+ .fsub,
+ .@"fsub fast",
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ .@"llvm.maxnum.",
+ .@"llvm.minnum.",
+ .@"llvm.sadd.sat.",
+ .@"llvm.smax.",
+ .@"llvm.smin.",
+ .@"llvm.smul.fix.sat.",
+ .@"llvm.sshl.sat.",
+ .@"llvm.ssub.sat.",
+ .@"llvm.uadd.sat.",
+ .@"llvm.umax.",
+ .@"llvm.umin.",
+ .@"llvm.umul.fix.sat.",
+ .@"llvm.ushl.sat.",
+ .@"llvm.usub.sat.",
+ .lshr,
+ .@"lshr exact",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .@"mul nuw nsw",
+ .@"or",
+ .sdiv,
+ .@"sdiv exact",
+ .shl,
+ .@"shl nsw",
+ .@"shl nuw",
+ .@"shl nuw nsw",
+ .srem,
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .@"sub nuw nsw",
+ .udiv,
+ .@"udiv exact",
+ .urem,
+ .xor,
+ => {
+ const extra = self.extraData(Instruction.Binary, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Binary{
+ .lhs = instructions.map(extra.lhs),
+ .rhs = instructions.map(extra.rhs),
+ });
+ },
+ .addrspacecast,
+ .bitcast,
+ .fpext,
+ .fptosi,
+ .fptoui,
+ .fptrunc,
+ .inttoptr,
+ .ptrtoint,
+ .sext,
+ .sitofp,
+ .trunc,
+ .uitofp,
+ .zext,
+ => {
+ const extra = self.extraData(Instruction.Cast, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Cast{
+ .val = instructions.map(extra.val),
+ .type = extra.type,
+ });
+ },
+ .alloca,
+ .@"alloca inalloca",
+ => {
+ const extra = self.extraData(Instruction.Alloca, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Alloca{
+ .type = extra.type,
+ .len = instructions.map(extra.len),
+ .info = extra.info,
+ });
+ },
+ .arg,
+ .block,
+ => unreachable,
+ .br,
+ .fence,
+ .@"ret void",
+ .unimplemented,
+ .@"unreachable",
+ => {},
+ .extractelement => {
+ const extra = self.extraData(Instruction.ExtractElement, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.ExtractElement{
+ .val = instructions.map(extra.val),
+ .index = instructions.map(extra.index),
+ });
+ },
+ .br_cond => {
+ const extra = self.extraData(Instruction.BrCond, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.BrCond{
+ .cond = instructions.map(extra.cond),
+ .then = extra.then,
+ .@"else" = extra.@"else",
+ });
+ },
+ .extractvalue => {
+ var extra = self.extraDataTrail(Instruction.ExtractValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, self);
+ instruction.data = wip_extra.addExtra(Instruction.ExtractValue{
+ .val = instructions.map(extra.data.val),
+ .indices_len = extra.data.indices_len,
+ });
+ wip_extra.appendSlice(indices);
+ },
+ .fneg,
+ .@"fneg fast",
+ .ret,
+ => instruction.data = @intFromEnum(instructions.map(@enumFromInt(instruction.data))),
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => {
+ var extra = self.extraDataTrail(Instruction.GetElementPtr, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, Value, self);
+ instruction.data = wip_extra.addExtra(Instruction.GetElementPtr{
+ .type = extra.data.type,
+ .base = instructions.map(extra.data.base),
+ .indices_len = extra.data.indices_len,
+ });
+ wip_extra.appendValues(indices, instructions);
+ },
+ .insertelement => {
+ const extra = self.extraData(Instruction.InsertElement, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.InsertElement{
+ .val = instructions.map(extra.val),
+ .elem = instructions.map(extra.elem),
+ .index = instructions.map(extra.index),
+ });
+ },
+ .insertvalue => {
+ var extra = self.extraDataTrail(Instruction.InsertValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, self);
+ instruction.data = wip_extra.addExtra(Instruction.InsertValue{
+ .val = instructions.map(extra.data.val),
+ .elem = instructions.map(extra.data.elem),
+ .indices_len = extra.data.indices_len,
+ });
+ wip_extra.appendSlice(indices);
+ },
+ .load,
+ .@"load atomic",
+ .@"load atomic volatile",
+ .@"load volatile",
+ => {
+ const extra = self.extraData(Instruction.Load, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Load{
+ .type = extra.type,
+ .ptr = instructions.map(extra.ptr),
+ .info = extra.info,
+ });
+ },
+ .phi,
+ .@"phi fast",
+ => {
+ const incoming_len = current_block.incoming;
+ var extra = self.extraDataTrail(Instruction.Phi, instruction.data);
+ const incoming_vals = extra.trail.next(incoming_len, Value, self);
+ const incoming_blocks = extra.trail.next(incoming_len, Block.Index, self);
+ instruction.data = wip_extra.addExtra(Instruction.Phi{
+ .type = extra.data.type,
+ });
+ wip_extra.appendValues(incoming_vals, instructions);
+ wip_extra.appendSlice(incoming_blocks);
+ },
+ .select,
+ .@"select fast",
+ => {
+ const extra = self.extraData(Instruction.Select, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Select{
+ .cond = instructions.map(extra.cond),
+ .lhs = instructions.map(extra.lhs),
+ .rhs = instructions.map(extra.rhs),
+ });
+ },
+ .shufflevector => {
+ const extra = self.extraData(Instruction.ShuffleVector, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.ShuffleVector{
+ .lhs = instructions.map(extra.lhs),
+ .rhs = instructions.map(extra.rhs),
+ .mask = instructions.map(extra.mask),
+ });
+ },
+ .store,
+ .@"store atomic",
+ .@"store atomic volatile",
+ .@"store volatile",
+ => {
+ const extra = self.extraData(Instruction.Store, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.Store{
+ .val = instructions.map(extra.val),
+ .ptr = instructions.map(extra.ptr),
+ .info = extra.info,
+ });
+ },
+ .@"switch" => {
+ var extra = self.extraDataTrail(Instruction.Switch, instruction.data);
+ const case_vals = extra.trail.next(extra.data.cases_len, Constant, self);
+ const case_blocks = extra.trail.next(extra.data.cases_len, Block.Index, self);
+ instruction.data = wip_extra.addExtra(Instruction.Switch{
+ .val = instructions.map(extra.data.val),
+ .default = extra.data.default,
+ .cases_len = extra.data.cases_len,
+ });
+ wip_extra.appendSlice(case_vals);
+ wip_extra.appendSlice(case_blocks);
+ },
+ .va_arg => {
+ const extra = self.extraData(Instruction.VaArg, instruction.data);
+ instruction.data = wip_extra.addExtra(Instruction.VaArg{
+ .list = instructions.map(extra.list),
+ .type = extra.type,
+ });
+ },
+ }
+ function.instructions.appendAssumeCapacity(instruction);
+ names[@intFromEnum(new_instruction_index)] = wip_name.map(if (self.builder.strip)
+ if (old_instruction_index.hasResultWip(self)) .empty else .none
+ else
+ self.names.items[@intFromEnum(old_instruction_index)]);
+ }
+ }
+
+ assert(function.instructions.len == final_instructions_len);
+ function.extra = wip_extra.finish();
+ function.blocks = blocks;
+ function.names = names.ptr;
+ function.metadata = if (metadata) |new_metadata| new_metadata.ptr else null;
+ }
+
+ pub fn deinit(self: *WipFunction) void {
+ self.extra.deinit(self.builder.gpa);
+ self.instructions.deinit(self.builder.gpa);
+ for (self.blocks.items) |*b| b.instructions.deinit(self.builder.gpa);
+ self.blocks.deinit(self.builder.gpa);
+ if (self.builder.useLibLlvm()) self.llvm.builder.dispose();
+ self.* = undefined;
+ }
+
+ fn cmpTag(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ cond: u32,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ switch (tag) {
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ => assert(lhs.typeOfWip(self) == rhs.typeOfWip(self)),
+ else => unreachable,
+ }
+ _ = try lhs.typeOfWip(self).changeScalar(.i1, self.builder);
+ try self.ensureUnusedExtraCapacity(1, Instruction.Binary, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = tag,
+ .data = self.addExtraAssumeCapacity(Instruction.Binary{
+ .lhs = lhs,
+ .rhs = rhs,
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ switch (tag) {
+ .@"fcmp false",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ => self.llvm.builder.setFastMath(false),
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ => self.llvm.builder.setFastMath(true),
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ => {},
+ else => unreachable,
+ }
+ self.llvm.instructions.appendAssumeCapacity(switch (tag) {
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ => self.llvm.builder.buildFCmp(
+ @enumFromInt(cond),
+ lhs.toLlvm(self),
+ rhs.toLlvm(self),
+ instruction.llvmName(self),
+ ),
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ => self.llvm.builder.buildICmp(
+ @enumFromInt(cond),
+ lhs.toLlvm(self),
+ rhs.toLlvm(self),
+ instruction.llvmName(self),
+ ),
+ else => unreachable,
+ });
+ }
+ return instruction.toValue();
+ }
+
+ fn phiTag(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ ty: Type,
+ name: []const u8,
+ ) Allocator.Error!WipPhi {
+ switch (tag) {
+ .phi, .@"phi fast" => assert(try ty.isSized(self.builder)),
+ else => unreachable,
+ }
+ const incoming = self.cursor.block.ptrConst(self).incoming;
+ assert(incoming > 0);
+ try self.ensureUnusedExtraCapacity(1, Instruction.Phi, incoming * 2);
+ const instruction = try self.addInst(name, .{
+ .tag = tag,
+ .data = self.addExtraAssumeCapacity(Instruction.Phi{ .type = ty }),
+ });
+ _ = self.extra.addManyAsSliceAssumeCapacity(incoming * 2);
+ if (self.builder.useLibLlvm()) {
+ switch (tag) {
+ .phi => self.llvm.builder.setFastMath(false),
+ .@"phi fast" => self.llvm.builder.setFastMath(true),
+ else => unreachable,
+ }
+ self.llvm.instructions.appendAssumeCapacity(
+ self.llvm.builder.buildPhi(ty.toLlvm(self.builder), instruction.llvmName(self)),
+ );
+ }
+ return .{ .block = self.cursor.block, .instruction = instruction };
+ }
+
+ fn selectTag(
+ self: *WipFunction,
+ tag: Instruction.Tag,
+ cond: Value,
+ lhs: Value,
+ rhs: Value,
+ name: []const u8,
+ ) Allocator.Error!Value {
+ switch (tag) {
+ .select, .@"select fast" => {
+ assert(cond.typeOfWip(self).scalarType(self.builder) == .i1);
+ assert(lhs.typeOfWip(self) == rhs.typeOfWip(self));
+ },
+ else => unreachable,
+ }
+ try self.ensureUnusedExtraCapacity(1, Instruction.Select, 0);
+ const instruction = try self.addInst(name, .{
+ .tag = tag,
+ .data = self.addExtraAssumeCapacity(Instruction.Select{
+ .cond = cond,
+ .lhs = lhs,
+ .rhs = rhs,
+ }),
+ });
+ if (self.builder.useLibLlvm()) {
+ switch (tag) {
+ .select => self.llvm.builder.setFastMath(false),
+ .@"select fast" => self.llvm.builder.setFastMath(true),
+ else => unreachable,
+ }
+ self.llvm.instructions.appendAssumeCapacity(self.llvm.builder.buildSelect(
+ cond.toLlvm(self),
+ lhs.toLlvm(self),
+ rhs.toLlvm(self),
+ instruction.llvmName(self),
+ ));
+ }
+ return instruction.toValue();
+ }
+
+ fn ensureUnusedExtraCapacity(
+ self: *WipFunction,
+ count: usize,
+ comptime Extra: type,
+ trail_len: usize,
+ ) Allocator.Error!void {
+ try self.extra.ensureUnusedCapacity(
+ self.builder.gpa,
+ count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ );
+ }
+
+ fn addInst(
+ self: *WipFunction,
+ name: ?[]const u8,
+ instruction: Instruction,
+ ) Allocator.Error!Instruction.Index {
+ const block_instructions = &self.cursor.block.ptr(self).instructions;
+ try self.instructions.ensureUnusedCapacity(self.builder.gpa, 1);
+ if (!self.builder.strip) try self.names.ensureUnusedCapacity(self.builder.gpa, 1);
+ try block_instructions.ensureUnusedCapacity(self.builder.gpa, 1);
+ if (self.builder.useLibLlvm())
+ try self.llvm.instructions.ensureUnusedCapacity(self.builder.gpa, 1);
+ const final_name = if (name) |n|
+ if (self.builder.strip) .empty else try self.builder.string(n)
+ else
+ .none;
+
+ if (self.builder.useLibLlvm()) self.llvm.builder.positionBuilder(
+ self.cursor.block.toLlvm(self),
+ for (block_instructions.items[self.cursor.instruction..]) |instruction_index| {
+ const llvm_instruction =
+ self.llvm.instructions.items[@intFromEnum(instruction_index)];
+ // TODO: remove when constant propagation is implemented
+ if (!llvm_instruction.isConstant().toBool()) break llvm_instruction;
+ } else null,
+ );
+
+ const index: Instruction.Index = @enumFromInt(self.instructions.len);
+ self.instructions.appendAssumeCapacity(instruction);
+ if (!self.builder.strip) self.names.appendAssumeCapacity(final_name);
+ block_instructions.insertAssumeCapacity(self.cursor.instruction, index);
+ self.cursor.instruction += 1;
+ return index;
+ }
+
+ fn addExtraAssumeCapacity(self: *WipFunction, extra: anytype) Instruction.ExtraIndex {
+ const result: Instruction.ExtraIndex = @intCast(self.extra.items.len);
+ inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ self.extra.appendAssumeCapacity(switch (field.type) {
+ u32 => value,
+ Alignment, AtomicOrdering, Block.Index, Type, Value => @intFromEnum(value),
+ MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ });
+ }
+ return result;
+ }
+
+ const ExtraDataTrail = struct {
+ index: Instruction.ExtraIndex,
+
+ fn nextMut(self: *ExtraDataTrail, len: u32, comptime Item: type, wip: *WipFunction) []Item {
+ const items: []Item = @ptrCast(wip.extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+
+ fn next(
+ self: *ExtraDataTrail,
+ len: u32,
+ comptime Item: type,
+ wip: *const WipFunction,
+ ) []const Item {
+ const items: []const Item = @ptrCast(wip.extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+ };
+
+ fn extraDataTrail(
+ self: *const WipFunction,
+ comptime T: type,
+ index: Instruction.ExtraIndex,
+ ) struct { data: T, trail: ExtraDataTrail } {
+ var result: T = undefined;
+ const fields = @typeInfo(T).Struct.fields;
+ inline for (fields, self.extra.items[index..][0..fields.len]) |field, value|
+ @field(result, field.name) = switch (field.type) {
+ u32 => value,
+ Alignment, AtomicOrdering, Block.Index, Type, Value => @enumFromInt(value),
+ MemoryAccessInfo, Instruction.Alloca.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ };
+ return .{
+ .data = result,
+ .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) },
+ };
+ }
+
+ fn extraData(self: *const WipFunction, comptime T: type, index: Instruction.ExtraIndex) T {
+ return self.extraDataTrail(T, index).data;
+ }
+};
+
+pub const FloatCondition = enum(u4) {
+ oeq = 1,
+ ogt = 2,
+ oge = 3,
+ olt = 4,
+ ole = 5,
+ one = 6,
+ ord = 7,
+ uno = 8,
+ ueq = 9,
+ ugt = 10,
+ uge = 11,
+ ult = 12,
+ ule = 13,
+ une = 14,
+};
+
+pub const IntegerCondition = enum(u6) {
+ eq = 32,
+ ne = 33,
+ ugt = 34,
+ uge = 35,
+ ult = 36,
+ ule = 37,
+ sgt = 38,
+ sge = 39,
+ slt = 40,
+ sle = 41,
+};
+
+pub const MemoryAccessKind = enum(u1) {
+ normal,
+ @"volatile",
+};
+
+pub const SyncScope = enum(u1) {
+ singlethread,
+ system,
+
+ pub fn format(
+ self: SyncScope,
+ comptime prefix: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .system) try writer.print(
+ \\{s} syncscope("{s}")
+ , .{ prefix, @tagName(self) });
+ }
+};
+
+pub const AtomicOrdering = enum(u3) {
+ none = 0,
+ unordered = 1,
+ monotonic = 2,
+ acquire = 4,
+ release = 5,
+ acq_rel = 6,
+ seq_cst = 7,
+
+ pub fn format(
+ self: AtomicOrdering,
+ comptime prefix: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (self != .none) try writer.print("{s} {s}", .{ prefix, @tagName(self) });
+ }
+};
+
+const MemoryAccessInfo = packed struct(u32) {
+ scope: SyncScope,
+ ordering: AtomicOrdering,
+ alignment: Alignment,
+ _: u22 = undefined,
+};
+
+pub const FastMath = packed struct(u32) {
+ nnan: bool = false,
+ ninf: bool = false,
+ nsz: bool = false,
+ arcp: bool = false,
+ contract: bool = false,
+ afn: bool = false,
+ reassoc: bool = false,
+
+ pub const fast = FastMath{
+ .nnan = true,
+ .ninf = true,
+ .nsz = true,
+ .arcp = true,
+ .contract = true,
+ .afn = true,
+ .realloc = true,
+ };
+};
+
+pub const Constant = enum(u32) {
+ false,
+ true,
+ none,
+ no_init = 1 << 31,
+ _,
+
+ const first_global: Constant = @enumFromInt(1 << 30);
+
+ pub const Tag = enum(u6) {
+ positive_integer,
+ negative_integer,
+ half,
+ bfloat,
+ float,
+ double,
+ fp128,
+ x86_fp80,
+ ppc_fp128,
+ null,
+ none,
+ structure,
+ packed_structure,
+ array,
+ string,
+ string_null,
+ vector,
+ splat,
+ zeroinitializer,
+ undef,
+ poison,
+ blockaddress,
+ dso_local_equivalent,
+ no_cfi,
+ trunc,
+ zext,
+ sext,
+ fptrunc,
+ fpext,
+ fptoui,
+ fptosi,
+ uitofp,
+ sitofp,
+ ptrtoint,
+ inttoptr,
+ bitcast,
+ addrspacecast,
+ getelementptr,
+ @"getelementptr inbounds",
+ icmp,
+ fcmp,
+ extractelement,
+ insertelement,
+ shufflevector,
+ add,
+ @"add nsw",
+ @"add nuw",
+ sub,
+ @"sub nsw",
+ @"sub nuw",
+ mul,
+ @"mul nsw",
+ @"mul nuw",
+ shl,
+ lshr,
+ ashr,
+ @"and",
+ @"or",
+ xor,
+ };
+
+ pub const Item = struct {
+ tag: Tag,
+ data: ExtraIndex,
+
+ const ExtraIndex = u32;
+ };
+
+ pub const Integer = packed struct(u64) {
+ type: Type,
+ limbs_len: u32,
+
+ pub const limbs = @divExact(@bitSizeOf(Integer), @bitSizeOf(std.math.big.Limb));
+ };
+
+ pub const Double = struct {
+ lo: u32,
+ hi: u32,
+ };
+
+ pub const Fp80 = struct {
+ lo_lo: u32,
+ lo_hi: u32,
+ hi: u32,
+ };
+
+ pub const Fp128 = struct {
+ lo_lo: u32,
+ lo_hi: u32,
+ hi_lo: u32,
+ hi_hi: u32,
+ };
+
+ pub const Aggregate = struct {
+ type: Type,
+ //fields: [type.aggregateLen(builder)]Constant,
+ };
+
+ pub const Splat = extern struct {
+ type: Type,
+ value: Constant,
+ };
+
+ pub const BlockAddress = extern struct {
+ function: Function.Index,
+ block: Function.Block.Index,
+ };
+
+ pub const Cast = extern struct {
+ val: Constant,
+ type: Type,
+
+ pub const Signedness = enum { unsigned, signed, unneeded };
+ };
+
+ pub const GetElementPtr = struct {
+ type: Type,
+ base: Constant,
+ info: Info,
+ //indices: [info.indices_len]Constant,
+
+ pub const Kind = enum { normal, inbounds };
+ pub const InRangeIndex = enum(u16) { none = std.math.maxInt(u16), _ };
+ pub const Info = packed struct(u32) { indices_len: u16, inrange: InRangeIndex };
+ };
+
+ pub const Compare = extern struct {
+ cond: u32,
+ lhs: Constant,
+ rhs: Constant,
+ };
+
+ pub const ExtractElement = extern struct {
+ val: Constant,
+ index: Constant,
+ };
+
+ pub const InsertElement = extern struct {
+ val: Constant,
+ elem: Constant,
+ index: Constant,
+ };
+
+ pub const ShuffleVector = extern struct {
+ lhs: Constant,
+ rhs: Constant,
+ mask: Constant,
+ };
+
+ pub const Binary = extern struct {
+ lhs: Constant,
+ rhs: Constant,
+ };
+
+ pub fn unwrap(self: Constant) union(enum) {
+ constant: u30,
+ global: Global.Index,
+ } {
+ return if (@intFromEnum(self) < @intFromEnum(first_global))
+ .{ .constant = @intCast(@intFromEnum(self)) }
+ else
+ .{ .global = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_global)) };
+ }
+
+ pub fn toValue(self: Constant) Value {
+ return @enumFromInt(@intFromEnum(Value.first_constant) + @intFromEnum(self));
+ }
+
+ pub fn typeOf(self: Constant, builder: *Builder) Type {
+ switch (self.unwrap()) {
+ .constant => |constant| {
+ const item = builder.constant_items.get(constant);
+ return switch (item.tag) {
+ .positive_integer,
+ .negative_integer,
+ => @as(
+ *align(@alignOf(std.math.big.Limb)) Integer,
+ @ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]),
+ ).type,
+ .half => .half,
+ .bfloat => .bfloat,
+ .float => .float,
+ .double => .double,
+ .fp128 => .fp128,
+ .x86_fp80 => .x86_fp80,
+ .ppc_fp128 => .ppc_fp128,
+ .null,
+ .none,
+ .zeroinitializer,
+ .undef,
+ .poison,
+ => @enumFromInt(item.data),
+ .structure,
+ .packed_structure,
+ .array,
+ .vector,
+ => builder.constantExtraData(Aggregate, item.data).type,
+ .splat => builder.constantExtraData(Splat, item.data).type,
+ .string,
+ .string_null,
+ => builder.arrayTypeAssumeCapacity(
+ @as(String, @enumFromInt(item.data)).toSlice(builder).?.len +
+ @intFromBool(item.tag == .string_null),
+ .i8,
+ ),
+ .blockaddress => builder.ptrTypeAssumeCapacity(
+ builder.constantExtraData(BlockAddress, item.data)
+ .function.ptrConst(builder).global.ptrConst(builder).addr_space,
+ ),
+ .dso_local_equivalent,
+ .no_cfi,
+ => builder.ptrTypeAssumeCapacity(@as(Function.Index, @enumFromInt(item.data))
+ .ptrConst(builder).global.ptrConst(builder).addr_space),
+ .trunc,
+ .zext,
+ .sext,
+ .fptrunc,
+ .fpext,
+ .fptoui,
+ .fptosi,
+ .uitofp,
+ .sitofp,
+ .ptrtoint,
+ .inttoptr,
+ .bitcast,
+ .addrspacecast,
+ => builder.constantExtraData(Cast, item.data).type,
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => {
+ var extra = builder.constantExtraDataTrail(GetElementPtr, item.data);
+ const indices =
+ extra.trail.next(extra.data.info.indices_len, Constant, builder);
+ const base_ty = extra.data.base.typeOf(builder);
+ if (!base_ty.isVector(builder)) for (indices) |index| {
+ const index_ty = index.typeOf(builder);
+ if (!index_ty.isVector(builder)) continue;
+ return index_ty.changeScalarAssumeCapacity(base_ty, builder);
+ };
+ return base_ty;
+ },
+ .icmp,
+ .fcmp,
+ => builder.constantExtraData(Compare, item.data).lhs.typeOf(builder)
+ .changeScalarAssumeCapacity(.i1, builder),
+ .extractelement => builder.constantExtraData(ExtractElement, item.data)
+ .val.typeOf(builder).childType(builder),
+ .insertelement => builder.constantExtraData(InsertElement, item.data)
+ .val.typeOf(builder),
+ .shufflevector => {
+ const extra = builder.constantExtraData(ShuffleVector, item.data);
+ return extra.lhs.typeOf(builder).changeLengthAssumeCapacity(
+ extra.mask.typeOf(builder).vectorLen(builder),
+ builder,
+ );
+ },
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .shl,
+ .lshr,
+ .ashr,
+ .@"and",
+ .@"or",
+ .xor,
+ => builder.constantExtraData(Binary, item.data).lhs.typeOf(builder),
+ };
+ },
+ .global => |global| return builder.ptrTypeAssumeCapacity(
+ global.ptrConst(builder).addr_space,
+ ),
+ }
+ }
+
+ pub fn isZeroInit(self: Constant, builder: *const Builder) bool {
+ switch (self.unwrap()) {
+ .constant => |constant| {
+ const item = builder.constant_items.get(constant);
+ return switch (item.tag) {
+ .positive_integer => {
+ const extra: *align(@alignOf(std.math.big.Limb)) Integer =
+ @ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]);
+ const limbs = builder.constant_limbs
+ .items[item.data + Integer.limbs ..][0..extra.limbs_len];
+ return std.mem.eql(std.math.big.Limb, limbs, &.{0});
+ },
+ .half, .bfloat, .float => item.data == 0,
+ .double => {
+ const extra = builder.constantExtraData(Constant.Double, item.data);
+ return extra.lo == 0 and extra.hi == 0;
+ },
+ .fp128, .ppc_fp128 => {
+ const extra = builder.constantExtraData(Constant.Fp128, item.data);
+ return extra.lo_lo == 0 and extra.lo_hi == 0 and
+ extra.hi_lo == 0 and extra.hi_hi == 0;
+ },
+ .x86_fp80 => {
+ const extra = builder.constantExtraData(Constant.Fp80, item.data);
+ return extra.lo_lo == 0 and extra.lo_hi == 0 and extra.hi == 0;
+ },
+ .vector => {
+ var extra = builder.constantExtraDataTrail(Aggregate, item.data);
+ const len: u32 = @intCast(extra.data.type.aggregateLen(builder));
+ const vals = extra.trail.next(len, Constant, builder);
+ for (vals) |val| if (!val.isZeroInit(builder)) return false;
+ return true;
+ },
+ .null, .zeroinitializer => true,
+ else => false,
+ };
+ },
+ .global => return false,
+ }
+ }
+
+ pub fn getBase(self: Constant, builder: *const Builder) Global.Index {
+ var cur = self;
+ while (true) switch (cur.unwrap()) {
+ .constant => |constant| {
+ const item = builder.constant_items.get(constant);
+ switch (item.tag) {
+ .ptrtoint,
+ .inttoptr,
+ .bitcast,
+ => cur = builder.constantExtraData(Cast, item.data).val,
+ .getelementptr => cur = builder.constantExtraData(GetElementPtr, item.data).base,
+ .add => {
+ const extra = builder.constantExtraData(Binary, item.data);
+ const lhs_base = extra.lhs.getBase(builder);
+ const rhs_base = extra.rhs.getBase(builder);
+ return if (lhs_base != .none and rhs_base != .none)
+ .none
+ else if (lhs_base != .none) lhs_base else rhs_base;
+ },
+ .sub => {
+ const extra = builder.constantExtraData(Binary, item.data);
+ if (extra.rhs.getBase(builder) != .none) return .none;
+ cur = extra.lhs;
+ },
+ else => return .none,
+ }
+ },
+ .global => |global| switch (global.ptrConst(builder).kind) {
+ .alias => |alias| cur = alias.ptrConst(builder).init,
+ .variable, .function => return global,
+ .replaced => unreachable,
+ },
+ };
+ }
+
+ const FormatData = struct {
+ constant: Constant,
+ builder: *Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime fmt_str: []const u8,
+ _: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ if (comptime std.mem.indexOfNone(u8, fmt_str, ", %")) |_|
+ @compileError("invalid format string: '" ++ fmt_str ++ "'");
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, ',') != null) {
+ if (data.constant == .no_init) return;
+ try writer.writeByte(',');
+ }
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, ' ') != null) {
+ if (data.constant == .no_init) return;
+ try writer.writeByte(' ');
+ }
+ if (comptime std.mem.indexOfScalar(u8, fmt_str, '%') != null)
+ try writer.print("{%} ", .{data.constant.typeOf(data.builder).fmt(data.builder)});
+ assert(data.constant != .no_init);
+ if (std.enums.tagName(Constant, data.constant)) |name| return writer.writeAll(name);
+ switch (data.constant.unwrap()) {
+ .constant => |constant| {
+ const item = data.builder.constant_items.get(constant);
+ switch (item.tag) {
+ .positive_integer,
+ .negative_integer,
+ => |tag| {
+ const extra: *align(@alignOf(std.math.big.Limb)) Integer =
+ @ptrCast(data.builder.constant_limbs.items[item.data..][0..Integer.limbs]);
+ const limbs = data.builder.constant_limbs
+ .items[item.data + Integer.limbs ..][0..extra.limbs_len];
+ const bigint = std.math.big.int.Const{
+ .limbs = limbs,
+ .positive = tag == .positive_integer,
+ };
+ const ExpectedContents = extern struct {
+ string: [(64 * 8 / std.math.log2(10)) + 2]u8,
+ limbs: [
+ std.math.big.int.calcToStringLimbsBufferLen(
+ 64 / @sizeOf(std.math.big.Limb),
+ 10,
+ )
+ ]std.math.big.Limb,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), data.builder.gpa);
+ const allocator = stack.get();
+ const str = bigint.toStringAlloc(allocator, 10, undefined) catch
+ return writer.writeAll("...");
+ defer allocator.free(str);
+ try writer.writeAll(str);
+ },
+ .half,
+ .bfloat,
+ => |tag| try writer.print("0x{c}{X:0>4}", .{ @as(u8, switch (tag) {
+ .half => 'H',
+ .bfloat => 'R',
+ else => unreachable,
+ }), item.data >> switch (tag) {
+ .half => 0,
+ .bfloat => 16,
+ else => unreachable,
+ } }),
+ .float => try writer.print("0x{X:0>16}", .{
+ @as(u64, @bitCast(@as(f64, @as(f32, @bitCast(item.data))))),
+ }),
+ .double => {
+ const extra = data.builder.constantExtraData(Double, item.data);
+ try writer.print("0x{X:0>8}{X:0>8}", .{ extra.hi, extra.lo });
+ },
+ .fp128,
+ .ppc_fp128,
+ => |tag| {
+ const extra = data.builder.constantExtraData(Fp128, item.data);
+ try writer.print("0x{c}{X:0>8}{X:0>8}{X:0>8}{X:0>8}", .{
+ @as(u8, switch (tag) {
+ .fp128 => 'L',
+ .ppc_fp128 => 'M',
+ else => unreachable,
+ }),
+ extra.lo_hi,
+ extra.lo_lo,
+ extra.hi_hi,
+ extra.hi_lo,
+ });
+ },
+ .x86_fp80 => {
+ const extra = data.builder.constantExtraData(Fp80, item.data);
+ try writer.print("0xK{X:0>4}{X:0>8}{X:0>8}", .{
+ extra.hi, extra.lo_hi, extra.lo_lo,
+ });
+ },
+ .null,
+ .none,
+ .zeroinitializer,
+ .undef,
+ .poison,
+ => |tag| try writer.writeAll(@tagName(tag)),
+ .structure,
+ .packed_structure,
+ .array,
+ .vector,
+ => |tag| {
+ var extra = data.builder.constantExtraDataTrail(Aggregate, item.data);
+ const len: u32 = @intCast(extra.data.type.aggregateLen(data.builder));
+ const vals = extra.trail.next(len, Constant, data.builder);
+ try writer.writeAll(switch (tag) {
+ .structure => "{ ",
+ .packed_structure => "<{ ",
+ .array => "[",
+ .vector => "<",
+ else => unreachable,
+ });
+ for (vals, 0..) |val, index| {
+ if (index > 0) try writer.writeAll(", ");
+ try writer.print("{%}", .{val.fmt(data.builder)});
+ }
+ try writer.writeAll(switch (tag) {
+ .structure => " }",
+ .packed_structure => " }>",
+ .array => "]",
+ .vector => ">",
+ else => unreachable,
+ });
+ },
+ .splat => {
+ const extra = data.builder.constantExtraData(Splat, item.data);
+ const len = extra.type.vectorLen(data.builder);
+ try writer.writeByte('<');
+ for (0..len) |index| {
+ if (index > 0) try writer.writeAll(", ");
+ try writer.print("{%}", .{extra.value.fmt(data.builder)});
+ }
+ try writer.writeByte('>');
+ },
+ inline .string,
+ .string_null,
+ => |tag| try writer.print("c{\"" ++ switch (tag) {
+ .string => "",
+ .string_null => "@",
+ else => unreachable,
+ } ++ "}", .{@as(String, @enumFromInt(item.data)).fmt(data.builder)}),
+ .blockaddress => |tag| {
+ const extra = data.builder.constantExtraData(BlockAddress, item.data);
+ const function = extra.function.ptrConst(data.builder);
+ try writer.print("{s}({}, %{d})", .{
+ @tagName(tag),
+ function.global.fmt(data.builder),
+ @intFromEnum(extra.block), // TODO
+ });
+ },
+ .dso_local_equivalent,
+ .no_cfi,
+ => |tag| {
+ const function: Function.Index = @enumFromInt(item.data);
+ try writer.print("{s} {}", .{
+ @tagName(tag),
+ function.ptrConst(data.builder).global.fmt(data.builder),
+ });
+ },
+ .trunc,
+ .zext,
+ .sext,
+ .fptrunc,
+ .fpext,
+ .fptoui,
+ .fptosi,
+ .uitofp,
+ .sitofp,
+ .ptrtoint,
+ .inttoptr,
+ .bitcast,
+ .addrspacecast,
+ => |tag| {
+ const extra = data.builder.constantExtraData(Cast, item.data);
+ try writer.print("{s} ({%} to {%})", .{
+ @tagName(tag),
+ extra.val.fmt(data.builder),
+ extra.type.fmt(data.builder),
+ });
+ },
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => |tag| {
+ var extra = data.builder.constantExtraDataTrail(GetElementPtr, item.data);
+ const indices =
+ extra.trail.next(extra.data.info.indices_len, Constant, data.builder);
+ try writer.print("{s} ({%}, {%}", .{
+ @tagName(tag),
+ extra.data.type.fmt(data.builder),
+ extra.data.base.fmt(data.builder),
+ });
+ for (indices) |index| try writer.print(", {%}", .{index.fmt(data.builder)});
+ try writer.writeByte(')');
+ },
+ inline .icmp,
+ .fcmp,
+ => |tag| {
+ const extra = data.builder.constantExtraData(Compare, item.data);
+ try writer.print("{s} {s} ({%}, {%})", .{
+ @tagName(tag),
+ @tagName(@as(switch (tag) {
+ .icmp => IntegerCondition,
+ .fcmp => FloatCondition,
+ else => unreachable,
+ }, @enumFromInt(extra.cond))),
+ extra.lhs.fmt(data.builder),
+ extra.rhs.fmt(data.builder),
+ });
+ },
+ .extractelement => |tag| {
+ const extra = data.builder.constantExtraData(ExtractElement, item.data);
+ try writer.print("{s} ({%}, {%})", .{
+ @tagName(tag),
+ extra.val.fmt(data.builder),
+ extra.index.fmt(data.builder),
+ });
+ },
+ .insertelement => |tag| {
+ const extra = data.builder.constantExtraData(InsertElement, item.data);
+ try writer.print("{s} ({%}, {%}, {%})", .{
+ @tagName(tag),
+ extra.val.fmt(data.builder),
+ extra.elem.fmt(data.builder),
+ extra.index.fmt(data.builder),
+ });
+ },
+ .shufflevector => |tag| {
+ const extra = data.builder.constantExtraData(ShuffleVector, item.data);
+ try writer.print("{s} ({%}, {%}, {%})", .{
+ @tagName(tag),
+ extra.lhs.fmt(data.builder),
+ extra.rhs.fmt(data.builder),
+ extra.mask.fmt(data.builder),
+ });
+ },
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .shl,
+ .lshr,
+ .ashr,
+ .@"and",
+ .@"or",
+ .xor,
+ => |tag| {
+ const extra = data.builder.constantExtraData(Binary, item.data);
+ try writer.print("{s} ({%}, {%})", .{
+ @tagName(tag),
+ extra.lhs.fmt(data.builder),
+ extra.rhs.fmt(data.builder),
+ });
+ },
+ }
+ },
+ .global => |global| try writer.print("{}", .{global.fmt(data.builder)}),
+ }
+ }
+ pub fn fmt(self: Constant, builder: *Builder) std.fmt.Formatter(format) {
+ return .{ .data = .{ .constant = self, .builder = builder } };
+ }
+
+ pub fn toLlvm(self: Constant, builder: *const Builder) *llvm.Value {
+ assert(builder.useLibLlvm());
+ return switch (self.unwrap()) {
+ .constant => |constant| builder.llvm.constants.items[constant],
+ .global => |global| global.toLlvm(builder),
+ };
+ }
+};
+
+pub const Value = enum(u32) {
+ none = std.math.maxInt(u31),
+ _,
+
+ const first_constant: Value = @enumFromInt(1 << 31);
+
+ pub fn unwrap(self: Value) union(enum) {
+ instruction: Function.Instruction.Index,
+ constant: Constant,
+ } {
+ return if (@intFromEnum(self) < @intFromEnum(first_constant))
+ .{ .instruction = @enumFromInt(@intFromEnum(self)) }
+ else
+ .{ .constant = @enumFromInt(@intFromEnum(self) - @intFromEnum(first_constant)) };
+ }
+
+ pub fn typeOfWip(self: Value, wip: *const WipFunction) Type {
+ return switch (self.unwrap()) {
+ .instruction => |instruction| instruction.typeOfWip(wip),
+ .constant => |constant| constant.typeOf(wip.builder),
+ };
+ }
+
+ pub fn typeOf(self: Value, function: Function.Index, builder: *Builder) Type {
+ return switch (self.unwrap()) {
+ .instruction => |instruction| instruction.typeOf(function, builder),
+ .constant => |constant| constant.typeOf(builder),
+ };
+ }
+
+ pub fn toConst(self: Value) ?Constant {
+ return switch (self.unwrap()) {
+ .instruction => null,
+ .constant => |constant| constant,
+ };
+ }
+
+ const FormatData = struct {
+ value: Value,
+ function: Function.Index,
+ builder: *Builder,
+ };
+ fn format(
+ data: FormatData,
+ comptime fmt_str: []const u8,
+ fmt_opts: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ switch (data.value.unwrap()) {
+ .instruction => |instruction| try Function.Instruction.Index.format(.{
+ .instruction = instruction,
+ .function = data.function,
+ .builder = data.builder,
+ }, fmt_str, fmt_opts, writer),
+ .constant => |constant| try Constant.format(.{
+ .constant = constant,
+ .builder = data.builder,
+ }, fmt_str, fmt_opts, writer),
+ }
+ }
+ pub fn fmt(self: Value, function: Function.Index, builder: *Builder) std.fmt.Formatter(format) {
+ return .{ .data = .{ .value = self, .function = function, .builder = builder } };
+ }
+
+ pub fn toLlvm(self: Value, wip: *const WipFunction) *llvm.Value {
+ return switch (self.unwrap()) {
+ .instruction => |instruction| instruction.toLlvm(wip),
+ .constant => |constant| constant.toLlvm(wip.builder),
+ };
+ }
+};
+
+pub const Metadata = enum(u32) { _ };
+
+pub const InitError = error{
+ InvalidLlvmTriple,
+} || Allocator.Error;
+
+pub fn init(options: Options) InitError!Builder {
+ var self = Builder{
+ .gpa = options.allocator,
+ .use_lib_llvm = options.use_lib_llvm,
+ .strip = options.strip,
+
+ .llvm = undefined,
+
+ .source_filename = .none,
+ .data_layout = .none,
+ .target_triple = .none,
+
+ .string_map = .{},
+ .string_bytes = .{},
+ .string_indices = .{},
+
+ .types = .{},
+ .next_unnamed_type = @enumFromInt(0),
+ .next_unique_type_id = .{},
+ .type_map = .{},
+ .type_items = .{},
+ .type_extra = .{},
+
+ .globals = .{},
+ .next_unnamed_global = @enumFromInt(0),
+ .next_replaced_global = .none,
+ .next_unique_global_id = .{},
+ .aliases = .{},
+ .variables = .{},
+ .functions = .{},
+
+ .constant_map = .{},
+ .constant_items = .{},
+ .constant_extra = .{},
+ .constant_limbs = .{},
+ };
+ if (self.useLibLlvm()) self.llvm = .{ .context = llvm.Context.create() };
+ errdefer self.deinit();
+
+ try self.string_indices.append(self.gpa, 0);
+ assert(try self.string("") == .empty);
+
+ if (options.name.len > 0) self.source_filename = try self.string(options.name);
+ self.initializeLLVMTarget(options.target.cpu.arch);
+ if (self.useLibLlvm()) self.llvm.module = llvm.Module.createWithName(
+ (self.source_filename.toSlice(&self) orelse "").ptr,
+ self.llvm.context,
+ );
+
+ if (options.triple.len > 0) {
+ self.target_triple = try self.string(options.triple);
+
+ if (self.useLibLlvm()) {
+ var error_message: [*:0]const u8 = undefined;
+ var target: *llvm.Target = undefined;
+ if (llvm.Target.getFromTriple(
+ self.target_triple.toSlice(&self).?,
+ &target,
+ &error_message,
+ ).toBool()) {
+ defer llvm.disposeMessage(error_message);
+
+ log.err("LLVM failed to parse '{s}': {s}", .{
+ self.target_triple.toSlice(&self).?,
+ error_message,
+ });
+ return InitError.InvalidLlvmTriple;
+ }
+ self.llvm.target = target;
+ self.llvm.module.?.setTarget(self.target_triple.toSlice(&self).?);
+ }
+ }
+
+ {
+ const static_len = @typeInfo(Type).Enum.fields.len - 1;
+ try self.type_map.ensureTotalCapacity(self.gpa, static_len);
+ try self.type_items.ensureTotalCapacity(self.gpa, static_len);
+ if (self.useLibLlvm()) try self.llvm.types.ensureTotalCapacity(self.gpa, static_len);
+ inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| {
+ const result = self.getOrPutTypeNoExtraAssumeCapacity(
+ .{ .tag = .simple, .data = simple_field.value },
+ );
+ assert(result.new and result.type == @field(Type, simple_field.name));
+ if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
+ @field(llvm.Context, simple_field.name ++ "Type")(self.llvm.context),
+ );
+ }
+ inline for (.{ 1, 8, 16, 29, 32, 64, 80, 128 }) |bits|
+ assert(self.intTypeAssumeCapacity(bits) ==
+ @field(Type, std.fmt.comptimePrint("i{d}", .{bits})));
+ inline for (.{0}) |addr_space|
+ assert(self.ptrTypeAssumeCapacity(@enumFromInt(addr_space)) == .ptr);
+ }
+
+ assert(try self.intConst(.i1, 0) == .false);
+ assert(try self.intConst(.i1, 1) == .true);
+ assert(try self.noneConst(.token) == .none);
+
+ return self;
+}
+
+pub fn deinit(self: *Builder) void {
+ self.string_map.deinit(self.gpa);
+ self.string_bytes.deinit(self.gpa);
+ self.string_indices.deinit(self.gpa);
+
+ self.types.deinit(self.gpa);
+ self.next_unique_type_id.deinit(self.gpa);
+ self.type_map.deinit(self.gpa);
+ self.type_items.deinit(self.gpa);
+ self.type_extra.deinit(self.gpa);
+
+ self.globals.deinit(self.gpa);
+ self.next_unique_global_id.deinit(self.gpa);
+ self.aliases.deinit(self.gpa);
+ self.variables.deinit(self.gpa);
+ for (self.functions.items) |*function| function.deinit(self.gpa);
+ self.functions.deinit(self.gpa);
+
+ self.constant_map.deinit(self.gpa);
+ self.constant_items.deinit(self.gpa);
+ self.constant_extra.deinit(self.gpa);
+ self.constant_limbs.deinit(self.gpa);
+
+ if (self.useLibLlvm()) {
+ self.llvm.constants.deinit(self.gpa);
+ self.llvm.globals.deinit(self.gpa);
+ self.llvm.types.deinit(self.gpa);
+ if (self.llvm.di_builder) |di_builder| di_builder.dispose();
+ if (self.llvm.module) |module| module.dispose();
+ self.llvm.context.dispose();
+ }
+ self.* = undefined;
+}
+
+pub fn initializeLLVMTarget(self: *const Builder, arch: std.Target.Cpu.Arch) void {
+ if (!self.useLibLlvm()) return;
+ switch (arch) {
+ .aarch64, .aarch64_be, .aarch64_32 => {
+ llvm.LLVMInitializeAArch64Target();
+ llvm.LLVMInitializeAArch64TargetInfo();
+ llvm.LLVMInitializeAArch64TargetMC();
+ llvm.LLVMInitializeAArch64AsmPrinter();
+ llvm.LLVMInitializeAArch64AsmParser();
+ },
+ .amdgcn => {
+ llvm.LLVMInitializeAMDGPUTarget();
+ llvm.LLVMInitializeAMDGPUTargetInfo();
+ llvm.LLVMInitializeAMDGPUTargetMC();
+ llvm.LLVMInitializeAMDGPUAsmPrinter();
+ llvm.LLVMInitializeAMDGPUAsmParser();
+ },
+ .thumb, .thumbeb, .arm, .armeb => {
+ llvm.LLVMInitializeARMTarget();
+ llvm.LLVMInitializeARMTargetInfo();
+ llvm.LLVMInitializeARMTargetMC();
+ llvm.LLVMInitializeARMAsmPrinter();
+ llvm.LLVMInitializeARMAsmParser();
+ },
+ .avr => {
+ llvm.LLVMInitializeAVRTarget();
+ llvm.LLVMInitializeAVRTargetInfo();
+ llvm.LLVMInitializeAVRTargetMC();
+ llvm.LLVMInitializeAVRAsmPrinter();
+ llvm.LLVMInitializeAVRAsmParser();
+ },
+ .bpfel, .bpfeb => {
+ llvm.LLVMInitializeBPFTarget();
+ llvm.LLVMInitializeBPFTargetInfo();
+ llvm.LLVMInitializeBPFTargetMC();
+ llvm.LLVMInitializeBPFAsmPrinter();
+ llvm.LLVMInitializeBPFAsmParser();
+ },
+ .hexagon => {
+ llvm.LLVMInitializeHexagonTarget();
+ llvm.LLVMInitializeHexagonTargetInfo();
+ llvm.LLVMInitializeHexagonTargetMC();
+ llvm.LLVMInitializeHexagonAsmPrinter();
+ llvm.LLVMInitializeHexagonAsmParser();
+ },
+ .lanai => {
+ llvm.LLVMInitializeLanaiTarget();
+ llvm.LLVMInitializeLanaiTargetInfo();
+ llvm.LLVMInitializeLanaiTargetMC();
+ llvm.LLVMInitializeLanaiAsmPrinter();
+ llvm.LLVMInitializeLanaiAsmParser();
+ },
+ .mips, .mipsel, .mips64, .mips64el => {
+ llvm.LLVMInitializeMipsTarget();
+ llvm.LLVMInitializeMipsTargetInfo();
+ llvm.LLVMInitializeMipsTargetMC();
+ llvm.LLVMInitializeMipsAsmPrinter();
+ llvm.LLVMInitializeMipsAsmParser();
+ },
+ .msp430 => {
+ llvm.LLVMInitializeMSP430Target();
+ llvm.LLVMInitializeMSP430TargetInfo();
+ llvm.LLVMInitializeMSP430TargetMC();
+ llvm.LLVMInitializeMSP430AsmPrinter();
+ llvm.LLVMInitializeMSP430AsmParser();
+ },
+ .nvptx, .nvptx64 => {
+ llvm.LLVMInitializeNVPTXTarget();
+ llvm.LLVMInitializeNVPTXTargetInfo();
+ llvm.LLVMInitializeNVPTXTargetMC();
+ llvm.LLVMInitializeNVPTXAsmPrinter();
+ // There is no LLVMInitializeNVPTXAsmParser function available.
+ },
+ .powerpc, .powerpcle, .powerpc64, .powerpc64le => {
+ llvm.LLVMInitializePowerPCTarget();
+ llvm.LLVMInitializePowerPCTargetInfo();
+ llvm.LLVMInitializePowerPCTargetMC();
+ llvm.LLVMInitializePowerPCAsmPrinter();
+ llvm.LLVMInitializePowerPCAsmParser();
+ },
+ .riscv32, .riscv64 => {
+ llvm.LLVMInitializeRISCVTarget();
+ llvm.LLVMInitializeRISCVTargetInfo();
+ llvm.LLVMInitializeRISCVTargetMC();
+ llvm.LLVMInitializeRISCVAsmPrinter();
+ llvm.LLVMInitializeRISCVAsmParser();
+ },
+ .sparc, .sparc64, .sparcel => {
+ llvm.LLVMInitializeSparcTarget();
+ llvm.LLVMInitializeSparcTargetInfo();
+ llvm.LLVMInitializeSparcTargetMC();
+ llvm.LLVMInitializeSparcAsmPrinter();
+ llvm.LLVMInitializeSparcAsmParser();
+ },
+ .s390x => {
+ llvm.LLVMInitializeSystemZTarget();
+ llvm.LLVMInitializeSystemZTargetInfo();
+ llvm.LLVMInitializeSystemZTargetMC();
+ llvm.LLVMInitializeSystemZAsmPrinter();
+ llvm.LLVMInitializeSystemZAsmParser();
+ },
+ .wasm32, .wasm64 => {
+ llvm.LLVMInitializeWebAssemblyTarget();
+ llvm.LLVMInitializeWebAssemblyTargetInfo();
+ llvm.LLVMInitializeWebAssemblyTargetMC();
+ llvm.LLVMInitializeWebAssemblyAsmPrinter();
+ llvm.LLVMInitializeWebAssemblyAsmParser();
+ },
+ .x86, .x86_64 => {
+ llvm.LLVMInitializeX86Target();
+ llvm.LLVMInitializeX86TargetInfo();
+ llvm.LLVMInitializeX86TargetMC();
+ llvm.LLVMInitializeX86AsmPrinter();
+ llvm.LLVMInitializeX86AsmParser();
+ },
+ .xtensa => {
+ if (build_options.llvm_has_xtensa) {
+ llvm.LLVMInitializeXtensaTarget();
+ llvm.LLVMInitializeXtensaTargetInfo();
+ llvm.LLVMInitializeXtensaTargetMC();
+ llvm.LLVMInitializeXtensaAsmPrinter();
+ llvm.LLVMInitializeXtensaAsmParser();
+ }
+ },
+ .xcore => {
+ llvm.LLVMInitializeXCoreTarget();
+ llvm.LLVMInitializeXCoreTargetInfo();
+ llvm.LLVMInitializeXCoreTargetMC();
+ llvm.LLVMInitializeXCoreAsmPrinter();
+ // There is no LLVMInitializeXCoreAsmParser function.
+ },
+ .m68k => {
+ if (build_options.llvm_has_m68k) {
+ llvm.LLVMInitializeM68kTarget();
+ llvm.LLVMInitializeM68kTargetInfo();
+ llvm.LLVMInitializeM68kTargetMC();
+ llvm.LLVMInitializeM68kAsmPrinter();
+ llvm.LLVMInitializeM68kAsmParser();
+ }
+ },
+ .csky => {
+ if (build_options.llvm_has_csky) {
+ llvm.LLVMInitializeCSKYTarget();
+ llvm.LLVMInitializeCSKYTargetInfo();
+ llvm.LLVMInitializeCSKYTargetMC();
+ // There is no LLVMInitializeCSKYAsmPrinter function.
+ llvm.LLVMInitializeCSKYAsmParser();
+ }
+ },
+ .ve => {
+ llvm.LLVMInitializeVETarget();
+ llvm.LLVMInitializeVETargetInfo();
+ llvm.LLVMInitializeVETargetMC();
+ llvm.LLVMInitializeVEAsmPrinter();
+ llvm.LLVMInitializeVEAsmParser();
+ },
+ .arc => {
+ if (build_options.llvm_has_arc) {
+ llvm.LLVMInitializeARCTarget();
+ llvm.LLVMInitializeARCTargetInfo();
+ llvm.LLVMInitializeARCTargetMC();
+ llvm.LLVMInitializeARCAsmPrinter();
+ // There is no LLVMInitializeARCAsmParser function.
+ }
+ },
+
+ // LLVM backends that have no initialization functions.
+ .tce,
+ .tcele,
+ .r600,
+ .le32,
+ .le64,
+ .amdil,
+ .amdil64,
+ .hsail,
+ .hsail64,
+ .shave,
+ .spir,
+ .spir64,
+ .kalimba,
+ .renderscript32,
+ .renderscript64,
+ .dxil,
+ .loongarch32,
+ .loongarch64,
+ => {},
+
+ .spu_2 => unreachable, // LLVM does not support this backend
+ .spirv32 => unreachable, // LLVM does not support this backend
+ .spirv64 => unreachable, // LLVM does not support this backend
+ }
+}
+
+pub fn string(self: *Builder, bytes: []const u8) Allocator.Error!String {
+ try self.string_bytes.ensureUnusedCapacity(self.gpa, bytes.len + 1);
+ try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
+ try self.string_map.ensureUnusedCapacity(self.gpa, 1);
+
+ const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ self.string_bytes.appendSliceAssumeCapacity(bytes);
+ self.string_bytes.appendAssumeCapacity(0);
+ self.string_indices.appendAssumeCapacity(@intCast(self.string_bytes.items.len));
+ }
+ return String.fromIndex(gop.index);
+}
+
+pub fn stringIfExists(self: *const Builder, bytes: []const u8) ?String {
+ return String.fromIndex(
+ self.string_map.getIndexAdapted(bytes, String.Adapter{ .builder = self }) orelse return null,
+ );
+}
+
+pub fn fmt(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) Allocator.Error!String {
+ try self.string_map.ensureUnusedCapacity(self.gpa, 1);
+ try self.string_bytes.ensureUnusedCapacity(self.gpa, std.fmt.count(fmt_str ++ .{0}, fmt_args));
+ try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
+ return self.fmtAssumeCapacity(fmt_str, fmt_args);
+}
+
+pub fn fmtAssumeCapacity(self: *Builder, comptime fmt_str: []const u8, fmt_args: anytype) String {
+ const start = self.string_bytes.items.len;
+ self.string_bytes.writer(self.gpa).print(fmt_str ++ .{0}, fmt_args) catch unreachable;
+ const bytes: []const u8 = self.string_bytes.items[start .. self.string_bytes.items.len - 1];
+
+ const gop = self.string_map.getOrPutAssumeCapacityAdapted(bytes, String.Adapter{ .builder = self });
+ if (gop.found_existing) {
+ self.string_bytes.shrinkRetainingCapacity(start);
+ } else {
+ self.string_indices.appendAssumeCapacity(@intCast(self.string_bytes.items.len));
+ }
+ return String.fromIndex(gop.index);
+}
+
+pub fn fnType(
+ self: *Builder,
+ ret: Type,
+ params: []const Type,
+ kind: Type.Function.Kind,
+) Allocator.Error!Type {
+ try self.ensureUnusedTypeCapacity(1, Type.Function, params.len);
+ return switch (kind) {
+ inline else => |comptime_kind| self.fnTypeAssumeCapacity(ret, params, comptime_kind),
+ };
+}
+
+pub fn intType(self: *Builder, bits: u24) Allocator.Error!Type {
+ try self.ensureUnusedTypeCapacity(1, NoExtra, 0);
+ return self.intTypeAssumeCapacity(bits);
+}
+
+pub fn ptrType(self: *Builder, addr_space: AddrSpace) Allocator.Error!Type {
+ try self.ensureUnusedTypeCapacity(1, NoExtra, 0);
+ return self.ptrTypeAssumeCapacity(addr_space);
+}
+
+pub fn vectorType(
+ self: *Builder,
+ kind: Type.Vector.Kind,
+ len: u32,
+ child: Type,
+) Allocator.Error!Type {
+ try self.ensureUnusedTypeCapacity(1, Type.Vector, 0);
+ return switch (kind) {
+ inline else => |comptime_kind| self.vectorTypeAssumeCapacity(comptime_kind, len, child),
+ };
+}
+
+pub fn arrayType(self: *Builder, len: u64, child: Type) Allocator.Error!Type {
+ comptime assert(@sizeOf(Type.Array) >= @sizeOf(Type.Vector));
+ try self.ensureUnusedTypeCapacity(1, Type.Array, 0);
+ return self.arrayTypeAssumeCapacity(len, child);
+}
+
+pub fn structType(
+ self: *Builder,
+ kind: Type.Structure.Kind,
+ fields: []const Type,
+) Allocator.Error!Type {
+ try self.ensureUnusedTypeCapacity(1, Type.Structure, fields.len);
+ return switch (kind) {
+ inline else => |comptime_kind| self.structTypeAssumeCapacity(comptime_kind, fields),
+ };
+}
+
+pub fn opaqueType(self: *Builder, name: String) Allocator.Error!Type {
+ try self.string_map.ensureUnusedCapacity(self.gpa, 1);
+ if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len +
+ comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)}));
+ try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
+ try self.types.ensureUnusedCapacity(self.gpa, 1);
+ try self.next_unique_type_id.ensureUnusedCapacity(self.gpa, 1);
+ try self.ensureUnusedTypeCapacity(1, Type.NamedStructure, 0);
+ return self.opaqueTypeAssumeCapacity(name);
+}
+
+pub fn namedTypeSetBody(
+ self: *Builder,
+ named_type: Type,
+ body_type: Type,
+) if (build_options.have_llvm) Allocator.Error!void else void {
+ const named_item = self.type_items.items[@intFromEnum(named_type)];
+ self.type_extra.items[named_item.data + std.meta.fieldIndex(Type.NamedStructure, "body").?] =
+ @intFromEnum(body_type);
+ if (self.useLibLlvm()) {
+ const body_item = self.type_items.items[@intFromEnum(body_type)];
+ var body_extra = self.typeExtraDataTrail(Type.Structure, body_item.data);
+ const body_fields = body_extra.trail.next(body_extra.data.fields_len, Type, self);
+ const llvm_fields = try self.gpa.alloc(*llvm.Type, body_fields.len);
+ defer self.gpa.free(llvm_fields);
+ for (llvm_fields, body_fields) |*llvm_field, body_field| llvm_field.* = body_field.toLlvm(self);
+ self.llvm.types.items[@intFromEnum(named_type)].structSetBody(
+ llvm_fields.ptr,
+ @intCast(llvm_fields.len),
+ switch (body_item.tag) {
+ .structure => .False,
+ .packed_structure => .True,
+ else => unreachable,
+ },
+ );
+ }
+}
+
+pub fn addGlobal(self: *Builder, name: String, global: Global) Allocator.Error!Global.Index {
+ assert(!name.isAnon());
+ try self.ensureUnusedTypeCapacity(1, NoExtra, 0);
+ try self.ensureUnusedGlobalCapacity(name);
+ return self.addGlobalAssumeCapacity(name, global);
+}
+
+pub fn addGlobalAssumeCapacity(self: *Builder, name: String, global: Global) Global.Index {
+ _ = self.ptrTypeAssumeCapacity(global.addr_space);
+ var id = name;
+ if (name == .empty) {
+ id = self.next_unnamed_global;
+ assert(id != self.next_replaced_global);
+ self.next_unnamed_global = @enumFromInt(@intFromEnum(id) + 1);
+ }
+ while (true) {
+ const global_gop = self.globals.getOrPutAssumeCapacity(id);
+ if (!global_gop.found_existing) {
+ global_gop.value_ptr.* = global;
+ global_gop.value_ptr.updateAttributes();
+ const index: Global.Index = @enumFromInt(global_gop.index);
+ index.updateName(self);
+ return index;
+ }
+
+ const unique_gop = self.next_unique_global_id.getOrPutAssumeCapacity(name);
+ if (!unique_gop.found_existing) unique_gop.value_ptr.* = 2;
+ id = self.fmtAssumeCapacity("{s}.{d}", .{ name.toSlice(self).?, unique_gop.value_ptr.* });
+ unique_gop.value_ptr.* += 1;
+ }
+}
+
+pub fn getGlobal(self: *const Builder, name: String) ?Global.Index {
+ return @enumFromInt(self.globals.getIndex(name) orelse return null);
+}
+
+pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Constant {
+ var limbs: [
+ switch (@typeInfo(@TypeOf(value))) {
+ .Int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits),
+ .ComptimeInt => std.math.big.int.calcLimbLen(value),
+ else => @compileError("intConst expected an integral value, got " ++
+ @typeName(@TypeOf(value))),
+ }
+ ]std.math.big.Limb = undefined;
+ return self.bigIntConst(ty, std.math.big.int.Mutable.init(&limbs, value).toConst());
+}
+
+pub fn intValue(self: *Builder, ty: Type, value: anytype) Allocator.Error!Value {
+ return (try self.intConst(ty, value)).toValue();
+}
+
+pub fn bigIntConst(self: *Builder, ty: Type, value: std.math.big.int.Const) Allocator.Error!Constant {
+ try self.constant_map.ensureUnusedCapacity(self.gpa, 1);
+ try self.constant_items.ensureUnusedCapacity(self.gpa, 1);
+ try self.constant_limbs.ensureUnusedCapacity(self.gpa, Constant.Integer.limbs + value.limbs.len);
+ if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, 1);
+ return self.bigIntConstAssumeCapacity(ty, value);
+}
+
+pub fn bigIntValue(self: *Builder, ty: Type, value: std.math.big.int.Const) Allocator.Error!Value {
+ return (try self.bigIntConst(ty, value)).toValue();
+}
+
+pub fn fpConst(self: *Builder, ty: Type, comptime val: comptime_float) Allocator.Error!Constant {
+ return switch (ty) {
+ .half => try self.halfConst(val),
+ .bfloat => try self.bfloatConst(val),
+ .float => try self.floatConst(val),
+ .double => try self.doubleConst(val),
+ .fp128 => try self.fp128Const(val),
+ .x86_fp80 => try self.x86_fp80Const(val),
+ .ppc_fp128 => try self.ppc_fp128Const(.{ val, -0.0 }),
+ else => unreachable,
+ };
+}
+
+pub fn fpValue(self: *Builder, ty: Type, comptime value: comptime_float) Allocator.Error!Value {
+ return (try self.fpConst(ty, value)).toValue();
+}
+
+pub fn nanConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ return switch (ty) {
+ .half => try self.halfConst(std.math.nan(f16)),
+ .bfloat => try self.bfloatConst(std.math.nan(f32)),
+ .float => try self.floatConst(std.math.nan(f32)),
+ .double => try self.doubleConst(std.math.nan(f64)),
+ .fp128 => try self.fp128Const(std.math.nan(f128)),
+ .x86_fp80 => try self.x86_fp80Const(std.math.nan(f80)),
+ .ppc_fp128 => try self.ppc_fp128Const(.{std.math.nan(f64)} ** 2),
+ else => unreachable,
+ };
+}
+
+pub fn nanValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.nanConst(ty)).toValue();
+}
+
+pub fn halfConst(self: *Builder, val: f16) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.halfConstAssumeCapacity(val);
+}
+
+pub fn halfValue(self: *Builder, ty: Type, value: f16) Allocator.Error!Value {
+ return (try self.halfConst(ty, value)).toValue();
+}
+
+pub fn bfloatConst(self: *Builder, val: f32) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.bfloatConstAssumeCapacity(val);
+}
+
+pub fn bfloatValue(self: *Builder, ty: Type, value: f32) Allocator.Error!Value {
+ return (try self.bfloatConst(ty, value)).toValue();
+}
+
+pub fn floatConst(self: *Builder, val: f32) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.floatConstAssumeCapacity(val);
+}
+
+pub fn floatValue(self: *Builder, ty: Type, value: f32) Allocator.Error!Value {
+ return (try self.floatConst(ty, value)).toValue();
+}
+
+pub fn doubleConst(self: *Builder, val: f64) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Double, 0);
+ return self.doubleConstAssumeCapacity(val);
+}
+
+pub fn doubleValue(self: *Builder, ty: Type, value: f64) Allocator.Error!Value {
+ return (try self.doubleConst(ty, value)).toValue();
+}
+
+pub fn fp128Const(self: *Builder, val: f128) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0);
+ return self.fp128ConstAssumeCapacity(val);
+}
+
+pub fn fp128Value(self: *Builder, ty: Type, value: f128) Allocator.Error!Value {
+ return (try self.fp128Const(ty, value)).toValue();
+}
+
+pub fn x86_fp80Const(self: *Builder, val: f80) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Fp80, 0);
+ return self.x86_fp80ConstAssumeCapacity(val);
+}
+
+pub fn x86_fp80Value(self: *Builder, ty: Type, value: f80) Allocator.Error!Value {
+ return (try self.x86_fp80Const(ty, value)).toValue();
+}
+
+pub fn ppc_fp128Const(self: *Builder, val: [2]f64) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0);
+ return self.ppc_fp128ConstAssumeCapacity(val);
+}
+
+pub fn ppc_fp128Value(self: *Builder, ty: Type, value: [2]f64) Allocator.Error!Value {
+ return (try self.ppc_fp128Const(ty, value)).toValue();
+}
+
+pub fn nullConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.nullConstAssumeCapacity(ty);
+}
+
+pub fn nullValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.nullConst(ty)).toValue();
+}
+
+pub fn noneConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.noneConstAssumeCapacity(ty);
+}
+
+pub fn noneValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.noneConst(ty)).toValue();
+}
+
+pub fn structConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len);
+ return self.structConstAssumeCapacity(ty, vals);
+}
+
+pub fn structValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value {
+ return (try self.structConst(ty, vals)).toValue();
+}
+
+pub fn arrayConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len);
+ return self.arrayConstAssumeCapacity(ty, vals);
+}
+
+pub fn arrayValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value {
+ return (try self.arrayConst(ty, vals)).toValue();
+}
+
+pub fn stringConst(self: *Builder, val: String) Allocator.Error!Constant {
+ try self.ensureUnusedTypeCapacity(1, Type.Array, 0);
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.stringConstAssumeCapacity(val);
+}
+
+pub fn stringValue(self: *Builder, val: String) Allocator.Error!Value {
+ return (try self.stringConst(val)).toValue();
+}
+
+pub fn stringNullConst(self: *Builder, val: String) Allocator.Error!Constant {
+ try self.ensureUnusedTypeCapacity(1, Type.Array, 0);
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.stringNullConstAssumeCapacity(val);
+}
+
+pub fn stringNullValue(self: *Builder, val: String) Allocator.Error!Value {
+ return (try self.stringNullConst(val)).toValue();
+}
+
+pub fn vectorConst(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Aggregate, vals.len);
+ return self.vectorConstAssumeCapacity(ty, vals);
+}
+
+pub fn vectorValue(self: *Builder, ty: Type, vals: []const Constant) Allocator.Error!Value {
+ return (try self.vectorConst(ty, vals)).toValue();
+}
+
+pub fn splatConst(self: *Builder, ty: Type, val: Constant) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Splat, 0);
+ return self.splatConstAssumeCapacity(ty, val);
+}
+
+pub fn splatValue(self: *Builder, ty: Type, val: Constant) Allocator.Error!Value {
+ return (try self.splatConst(ty, val)).toValue();
+}
+
+pub fn zeroInitConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Fp128, 0);
+ try self.constant_limbs.ensureUnusedCapacity(
+ self.gpa,
+ Constant.Integer.limbs + comptime std.math.big.int.calcLimbLen(0),
+ );
+ return self.zeroInitConstAssumeCapacity(ty);
+}
+
+pub fn zeroInitValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.zeroInitConst(ty)).toValue();
+}
+
+pub fn undefConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.undefConstAssumeCapacity(ty);
+}
+
+pub fn undefValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.undefConst(ty)).toValue();
+}
+
+pub fn poisonConst(self: *Builder, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.poisonConstAssumeCapacity(ty);
+}
+
+pub fn poisonValue(self: *Builder, ty: Type) Allocator.Error!Value {
+ return (try self.poisonConst(ty)).toValue();
+}
+
+pub fn blockAddrConst(
+ self: *Builder,
+ function: Function.Index,
+ block: Function.Block.Index,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.BlockAddress, 0);
+ return self.blockAddrConstAssumeCapacity(function, block);
+}
+
+pub fn blockAddrValue(
+ self: *Builder,
+ function: Function.Index,
+ block: Function.Block.Index,
+) Allocator.Error!Value {
+ return (try self.blockAddrConst(function, block)).toValue();
+}
+
+pub fn dsoLocalEquivalentConst(self: *Builder, function: Function.Index) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.dsoLocalEquivalentConstAssumeCapacity(function);
+}
+
+pub fn dsoLocalEquivalentValue(self: *Builder, function: Function.Index) Allocator.Error!Value {
+ return (try self.dsoLocalEquivalentConst(function)).toValue();
+}
+
+pub fn noCfiConst(self: *Builder, function: Function.Index) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, NoExtra, 0);
+ return self.noCfiConstAssumeCapacity(function);
+}
+
+pub fn noCfiValue(self: *Builder, function: Function.Index) Allocator.Error!Value {
+ return (try self.noCfiConst(function)).toValue();
+}
+
+pub fn convConst(
+ self: *Builder,
+ signedness: Constant.Cast.Signedness,
+ val: Constant,
+ ty: Type,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0);
+ return self.convConstAssumeCapacity(signedness, val, ty);
+}
+
+pub fn convValue(
+ self: *Builder,
+ signedness: Constant.Cast.Signedness,
+ val: Constant,
+ ty: Type,
+) Allocator.Error!Value {
+ return (try self.convConst(signedness, val, ty)).toValue();
+}
+
+pub fn castConst(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Cast, 0);
+ return self.castConstAssumeCapacity(tag, val, ty);
+}
+
+pub fn castValue(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Allocator.Error!Value {
+ return (try self.castConst(tag, val, ty)).toValue();
+}
+
+pub fn gepConst(
+ self: *Builder,
+ comptime kind: Constant.GetElementPtr.Kind,
+ ty: Type,
+ base: Constant,
+ inrange: ?u16,
+ indices: []const Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedTypeCapacity(1, Type.Vector, 0);
+ try self.ensureUnusedConstantCapacity(1, Constant.GetElementPtr, indices.len);
+ return self.gepConstAssumeCapacity(kind, ty, base, inrange, indices);
+}
+
+pub fn gepValue(
+ self: *Builder,
+ comptime kind: Constant.GetElementPtr.Kind,
+ ty: Type,
+ base: Constant,
+ inrange: ?u16,
+ indices: []const Constant,
+) Allocator.Error!Value {
+ return (try self.gepConst(kind, ty, base, inrange, indices)).toValue();
+}
+
+pub fn icmpConst(
+ self: *Builder,
+ cond: IntegerCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Compare, 0);
+ return self.icmpConstAssumeCapacity(cond, lhs, rhs);
+}
+
+pub fn icmpValue(
+ self: *Builder,
+ cond: IntegerCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Allocator.Error!Value {
+ return (try self.icmpConst(cond, lhs, rhs)).toValue();
+}
+
+pub fn fcmpConst(
+ self: *Builder,
+ cond: FloatCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Compare, 0);
+ return self.icmpConstAssumeCapacity(cond, lhs, rhs);
+}
+
+pub fn fcmpValue(
+ self: *Builder,
+ cond: FloatCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Allocator.Error!Value {
+ return (try self.fcmpConst(cond, lhs, rhs)).toValue();
+}
+
+pub fn extractElementConst(self: *Builder, val: Constant, index: Constant) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.ExtractElement, 0);
+ return self.extractElementConstAssumeCapacity(val, index);
+}
+
+pub fn extractElementValue(self: *Builder, val: Constant, index: Constant) Allocator.Error!Value {
+ return (try self.extractElementConst(val, index)).toValue();
+}
+
+pub fn insertElementConst(
+ self: *Builder,
+ val: Constant,
+ elem: Constant,
+ index: Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.InsertElement, 0);
+ return self.insertElementConstAssumeCapacity(val, elem, index);
+}
+
+pub fn insertElementValue(
+ self: *Builder,
+ val: Constant,
+ elem: Constant,
+ index: Constant,
+) Allocator.Error!Value {
+ return (try self.insertElementConst(val, elem, index)).toValue();
+}
+
+pub fn shuffleVectorConst(
+ self: *Builder,
+ lhs: Constant,
+ rhs: Constant,
+ mask: Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedTypeCapacity(1, Type.Array, 0);
+ try self.ensureUnusedConstantCapacity(1, Constant.ShuffleVector, 0);
+ return self.shuffleVectorConstAssumeCapacity(lhs, rhs, mask);
+}
+
+pub fn shuffleVectorValue(
+ self: *Builder,
+ lhs: Constant,
+ rhs: Constant,
+ mask: Constant,
+) Allocator.Error!Value {
+ return (try self.shuffleVectorConst(lhs, rhs, mask)).toValue();
+}
+
+pub fn binConst(
+ self: *Builder,
+ tag: Constant.Tag,
+ lhs: Constant,
+ rhs: Constant,
+) Allocator.Error!Constant {
+ try self.ensureUnusedConstantCapacity(1, Constant.Binary, 0);
+ return self.binConstAssumeCapacity(tag, lhs, rhs);
+}
+
+pub fn binValue(self: *Builder, tag: Constant.Tag, lhs: Constant, rhs: Constant) Allocator.Error!Value {
+ return (try self.binConst(tag, lhs, rhs)).toValue();
+}
+
+pub fn dump(self: *Builder) void {
+ if (self.useLibLlvm())
+ self.llvm.module.?.dump()
+ else
+ self.print(std.io.getStdErr().writer()) catch {};
+}
+
+pub fn printToFile(self: *Builder, path: []const u8) Allocator.Error!bool {
+ const path_z = try self.gpa.dupeZ(u8, path);
+ defer self.gpa.free(path_z);
+ return self.printToFileZ(path_z);
+}
+
+pub fn printToFileZ(self: *Builder, path: [*:0]const u8) bool {
+ if (self.useLibLlvm()) {
+ var error_message: [*:0]const u8 = undefined;
+ if (self.llvm.module.?.printModuleToFile(path, &error_message).toBool()) {
+ defer llvm.disposeMessage(error_message);
+ log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, error_message });
+ return false;
+ }
+ } else {
+ var file = std.fs.cwd().createFileZ(path, .{}) catch |err| {
+ log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
+ return false;
+ };
+ defer file.close();
+ self.print(file.writer()) catch |err| {
+ log.err("failed printing LLVM module to \"{s}\": {s}", .{ path, @errorName(err) });
+ return false;
+ };
+ }
+ return true;
+}
+
+pub fn print(self: *Builder, writer: anytype) (@TypeOf(writer).Error || Allocator.Error)!void {
+ var bw = std.io.bufferedWriter(writer);
+ try self.printUnbuffered(bw.writer());
+ try bw.flush();
+}
+
+pub fn printUnbuffered(
+ self: *Builder,
+ writer: anytype,
+) (@TypeOf(writer).Error || Allocator.Error)!void {
+ if (self.source_filename != .none) try writer.print(
+ \\; ModuleID = '{s}'
+ \\source_filename = {"}
+ \\
+ , .{ self.source_filename.toSlice(self).?, self.source_filename.fmt(self) });
+ if (self.data_layout != .none) try writer.print(
+ \\target datalayout = {"}
+ \\
+ , .{self.data_layout.fmt(self)});
+ if (self.target_triple != .none) try writer.print(
+ \\target triple = {"}
+ \\
+ , .{self.target_triple.fmt(self)});
+ try writer.writeByte('\n');
+ for (self.types.keys(), self.types.values()) |id, ty| try writer.print(
+ \\%{} = type {}
+ \\
+ , .{ id.fmt(self), ty.fmt(self) });
+ try writer.writeByte('\n');
+ for (self.variables.items) |variable| {
+ if (variable.global.getReplacement(self) != .none) continue;
+ const global = variable.global.ptrConst(self);
+ try writer.print(
+ \\{} ={}{}{}{}{}{}{}{} {s} {%}{ }{,}
+ \\
+ , .{
+ variable.global.fmt(self),
+ global.linkage,
+ global.preemption,
+ global.visibility,
+ global.dll_storage_class,
+ variable.thread_local,
+ global.unnamed_addr,
+ global.addr_space,
+ global.externally_initialized,
+ @tagName(variable.mutability),
+ global.type.fmt(self),
+ variable.init.fmt(self),
+ variable.alignment,
+ });
+ }
+ try writer.writeByte('\n');
+ for (0.., self.functions.items) |function_i, function| {
+ const function_index: Function.Index = @enumFromInt(function_i);
+ if (function.global.getReplacement(self) != .none) continue;
+ const global = function.global.ptrConst(self);
+ const params_len = global.type.functionParameters(self).len;
+ try writer.print(
+ \\{s}{}{}{}{} {} {}(
+ , .{
+ if (function.instructions.len > 0) "define" else "declare",
+ global.linkage,
+ global.preemption,
+ global.visibility,
+ global.dll_storage_class,
+ global.type.functionReturn(self).fmt(self),
+ function.global.fmt(self),
+ });
+ for (0..params_len) |arg| {
+ if (arg > 0) try writer.writeAll(", ");
+ if (function.instructions.len > 0)
+ try writer.print("{%}", .{function.arg(@intCast(arg)).fmt(function_index, self)})
+ else
+ try writer.print("{%}", .{global.type.functionParameters(self)[arg].fmt(self)});
+ }
+ switch (global.type.functionKind(self)) {
+ .normal => {},
+ .vararg => {
+ if (params_len > 0) try writer.writeAll(", ");
+ try writer.writeAll("...");
+ },
+ }
+ try writer.print("){}{}", .{ global.unnamed_addr, function.alignment });
+ if (function.instructions.len > 0) {
+ var block_incoming_len: u32 = undefined;
+ try writer.writeAll(" {\n");
+ for (params_len..function.instructions.len) |instruction_i| {
+ const instruction_index: Function.Instruction.Index = @enumFromInt(instruction_i);
+ const instruction = function.instructions.get(@intFromEnum(instruction_index));
+ switch (instruction.tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .@"add nuw nsw",
+ .@"and",
+ .ashr,
+ .@"ashr exact",
+ .fadd,
+ .@"fadd fast",
+ .@"fcmp false",
+ .@"fcmp fast false",
+ .@"fcmp fast oeq",
+ .@"fcmp fast oge",
+ .@"fcmp fast ogt",
+ .@"fcmp fast ole",
+ .@"fcmp fast olt",
+ .@"fcmp fast one",
+ .@"fcmp fast ord",
+ .@"fcmp fast true",
+ .@"fcmp fast ueq",
+ .@"fcmp fast uge",
+ .@"fcmp fast ugt",
+ .@"fcmp fast ule",
+ .@"fcmp fast ult",
+ .@"fcmp fast une",
+ .@"fcmp fast uno",
+ .@"fcmp oeq",
+ .@"fcmp oge",
+ .@"fcmp ogt",
+ .@"fcmp ole",
+ .@"fcmp olt",
+ .@"fcmp one",
+ .@"fcmp ord",
+ .@"fcmp true",
+ .@"fcmp ueq",
+ .@"fcmp uge",
+ .@"fcmp ugt",
+ .@"fcmp ule",
+ .@"fcmp ult",
+ .@"fcmp une",
+ .@"fcmp uno",
+ .fdiv,
+ .@"fdiv fast",
+ .fmul,
+ .@"fmul fast",
+ .frem,
+ .@"frem fast",
+ .fsub,
+ .@"fsub fast",
+ .@"icmp eq",
+ .@"icmp ne",
+ .@"icmp sge",
+ .@"icmp sgt",
+ .@"icmp sle",
+ .@"icmp slt",
+ .@"icmp uge",
+ .@"icmp ugt",
+ .@"icmp ule",
+ .@"icmp ult",
+ .lshr,
+ .@"lshr exact",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .@"mul nuw nsw",
+ .@"or",
+ .sdiv,
+ .@"sdiv exact",
+ .srem,
+ .shl,
+ .@"shl nsw",
+ .@"shl nuw",
+ .@"shl nuw nsw",
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .@"sub nuw nsw",
+ .udiv,
+ .@"udiv exact",
+ .urem,
+ .xor,
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Binary, instruction.data);
+ try writer.print(" %{} = {s} {%}, {}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.lhs.fmt(function_index, self),
+ extra.rhs.fmt(function_index, self),
+ });
+ },
+ .addrspacecast,
+ .bitcast,
+ .fpext,
+ .fptosi,
+ .fptoui,
+ .fptrunc,
+ .inttoptr,
+ .ptrtoint,
+ .sext,
+ .sitofp,
+ .trunc,
+ .uitofp,
+ .zext,
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Cast, instruction.data);
+ try writer.print(" %{} = {s} {%} to {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.val.fmt(function_index, self),
+ extra.type.fmt(self),
+ });
+ },
+ .alloca,
+ .@"alloca inalloca",
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Alloca, instruction.data);
+ try writer.print(" %{} = {s} {%}{,%}{,}{,}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.type.fmt(self),
+ extra.len.fmt(function_index, self),
+ extra.info.alignment,
+ extra.info.addr_space,
+ });
+ },
+ .arg => unreachable,
+ .block => {
+ block_incoming_len = instruction.data;
+ const name = instruction_index.name(&function);
+ if (@intFromEnum(instruction_index) > params_len) try writer.writeByte('\n');
+ try writer.print("{}:\n", .{name.fmt(self)});
+ },
+ .br => |tag| {
+ const target: Function.Block.Index = @enumFromInt(instruction.data);
+ try writer.print(" {s} {%}\n", .{
+ @tagName(tag), target.toInst(&function).fmt(function_index, self),
+ });
+ },
+ .br_cond => {
+ const extra = function.extraData(Function.Instruction.BrCond, instruction.data);
+ try writer.print(" br {%}, {%}, {%}\n", .{
+ extra.cond.fmt(function_index, self),
+ extra.then.toInst(&function).fmt(function_index, self),
+ extra.@"else".toInst(&function).fmt(function_index, self),
+ });
+ },
+ .extractelement => |tag| {
+ const extra =
+ function.extraData(Function.Instruction.ExtractElement, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.val.fmt(function_index, self),
+ extra.index.fmt(function_index, self),
+ });
+ },
+ .extractvalue => |tag| {
+ var extra =
+ function.extraDataTrail(Function.Instruction.ExtractValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, &function);
+ try writer.print(" %{} = {s} {%}", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.data.val.fmt(function_index, self),
+ });
+ for (indices) |index| try writer.print(", {d}", .{index});
+ try writer.writeByte('\n');
+ },
+ .fence => |tag| {
+ const info: MemoryAccessInfo = @bitCast(instruction.data);
+ try writer.print(" {s}{}{}", .{ @tagName(tag), info.scope, info.ordering });
+ },
+ .fneg,
+ .@"fneg fast",
+ .ret,
+ => |tag| {
+ const val: Value = @enumFromInt(instruction.data);
+ try writer.print(" {s} {%}\n", .{
+ @tagName(tag),
+ val.fmt(function_index, self),
+ });
+ },
+ .getelementptr,
+ .@"getelementptr inbounds",
+ => |tag| {
+ var extra = function.extraDataTrail(
+ Function.Instruction.GetElementPtr,
+ instruction.data,
+ );
+ const indices = extra.trail.next(extra.data.indices_len, Value, &function);
+ try writer.print(" %{} = {s} {%}, {%}", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.data.type.fmt(self),
+ extra.data.base.fmt(function_index, self),
+ });
+ for (indices) |index| try writer.print(", {%}", .{
+ index.fmt(function_index, self),
+ });
+ try writer.writeByte('\n');
+ },
+ .insertelement => |tag| {
+ const extra =
+ function.extraData(Function.Instruction.InsertElement, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.val.fmt(function_index, self),
+ extra.elem.fmt(function_index, self),
+ extra.index.fmt(function_index, self),
+ });
+ },
+ .insertvalue => |tag| {
+ var extra =
+ function.extraDataTrail(Function.Instruction.InsertValue, instruction.data);
+ const indices = extra.trail.next(extra.data.indices_len, u32, &function);
+ try writer.print(" %{} = {s} {%}, {%}", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.data.val.fmt(function_index, self),
+ extra.data.elem.fmt(function_index, self),
+ });
+ for (indices) |index| try writer.print(", {d}", .{index});
+ try writer.writeByte('\n');
+ },
+ .@"llvm.maxnum.",
+ .@"llvm.minnum.",
+ .@"llvm.sadd.sat.",
+ .@"llvm.smax.",
+ .@"llvm.smin.",
+ .@"llvm.smul.fix.sat.",
+ .@"llvm.sshl.sat.",
+ .@"llvm.ssub.sat.",
+ .@"llvm.uadd.sat.",
+ .@"llvm.umax.",
+ .@"llvm.umin.",
+ .@"llvm.umul.fix.sat.",
+ .@"llvm.ushl.sat.",
+ .@"llvm.usub.sat.",
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Binary, instruction.data);
+ const ty = instruction_index.typeOf(function_index, self);
+ try writer.print(" %{} = call {%} @{s}{m}({%}, {%})\n", .{
+ instruction_index.name(&function).fmt(self),
+ ty.fmt(self),
+ @tagName(tag),
+ ty.fmt(self),
+ extra.lhs.fmt(function_index, self),
+ extra.rhs.fmt(function_index, self),
+ });
+ },
+ .load,
+ .@"load atomic",
+ .@"load atomic volatile",
+ .@"load volatile",
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Load, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}{}{}{,}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.type.fmt(self),
+ extra.ptr.fmt(function_index, self),
+ extra.info.scope,
+ extra.info.ordering,
+ extra.info.alignment,
+ });
+ },
+ .phi,
+ .@"phi fast",
+ => |tag| {
+ var extra = function.extraDataTrail(Function.Instruction.Phi, instruction.data);
+ const vals = extra.trail.next(block_incoming_len, Value, &function);
+ const blocks =
+ extra.trail.next(block_incoming_len, Function.Block.Index, &function);
+ try writer.print(" %{} = {s} {%} ", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ vals[0].typeOf(function_index, self).fmt(self),
+ });
+ for (0.., vals, blocks) |incoming_index, incoming_val, incoming_block| {
+ if (incoming_index > 0) try writer.writeAll(", ");
+ try writer.print("[ {}, {} ]", .{
+ incoming_val.fmt(function_index, self),
+ incoming_block.toInst(&function).fmt(function_index, self),
+ });
+ }
+ try writer.writeByte('\n');
+ },
+ .@"ret void",
+ .@"unreachable",
+ => |tag| try writer.print(" {s}\n", .{@tagName(tag)}),
+ .select,
+ .@"select fast",
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Select, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.cond.fmt(function_index, self),
+ extra.lhs.fmt(function_index, self),
+ extra.rhs.fmt(function_index, self),
+ });
+ },
+ .shufflevector => |tag| {
+ const extra =
+ function.extraData(Function.Instruction.ShuffleVector, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}, {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.lhs.fmt(function_index, self),
+ extra.rhs.fmt(function_index, self),
+ extra.mask.fmt(function_index, self),
+ });
+ },
+ .store,
+ .@"store atomic",
+ .@"store atomic volatile",
+ .@"store volatile",
+ => |tag| {
+ const extra = function.extraData(Function.Instruction.Store, instruction.data);
+ try writer.print(" {s} {%}, {%}{}{}{,}\n", .{
+ @tagName(tag),
+ extra.val.fmt(function_index, self),
+ extra.ptr.fmt(function_index, self),
+ extra.info.scope,
+ extra.info.ordering,
+ extra.info.alignment,
+ });
+ },
+ .@"switch" => |tag| {
+ var extra =
+ function.extraDataTrail(Function.Instruction.Switch, instruction.data);
+ const vals = extra.trail.next(extra.data.cases_len, Constant, &function);
+ const blocks =
+ extra.trail.next(extra.data.cases_len, Function.Block.Index, &function);
+ try writer.print(" {s} {%}, {%} [", .{
+ @tagName(tag),
+ extra.data.val.fmt(function_index, self),
+ extra.data.default.toInst(&function).fmt(function_index, self),
+ });
+ for (vals, blocks) |case_val, case_block| try writer.print(" {%}, {%}\n", .{
+ case_val.fmt(self),
+ case_block.toInst(&function).fmt(function_index, self),
+ });
+ try writer.writeAll(" ]\n");
+ },
+ .unimplemented => |tag| {
+ const ty: Type = @enumFromInt(instruction.data);
+ try writer.writeAll(" ");
+ switch (ty) {
+ .none, .void => {},
+ else => try writer.print("%{} = ", .{
+ instruction_index.name(&function).fmt(self),
+ }),
+ }
+ try writer.print("{s} {%}\n", .{ @tagName(tag), ty.fmt(self) });
+ },
+ .va_arg => |tag| {
+ const extra = function.extraData(Function.Instruction.VaArg, instruction.data);
+ try writer.print(" %{} = {s} {%}, {%}\n", .{
+ instruction_index.name(&function).fmt(self),
+ @tagName(tag),
+ extra.list.fmt(function_index, self),
+ extra.type.fmt(self),
+ });
+ },
+ }
+ }
+ try writer.writeByte('}');
+ }
+ try writer.writeAll("\n\n");
+ }
+}
+
+pub inline fn useLibLlvm(self: *const Builder) bool {
+ return build_options.have_llvm and self.use_lib_llvm;
+}
+
+const NoExtra = struct {};
+
+fn isValidIdentifier(id: []const u8) bool {
+ for (id, 0..) |character, index| switch (character) {
+ '$', '-', '.', 'A'...'Z', '_', 'a'...'z' => {},
+ '0'...'9' => if (index == 0) return false,
+ else => return false,
+ };
+ return true;
+}
+
+fn ensureUnusedGlobalCapacity(self: *Builder, name: String) Allocator.Error!void {
+ if (self.useLibLlvm()) try self.llvm.globals.ensureUnusedCapacity(self.gpa, 1);
+ try self.string_map.ensureUnusedCapacity(self.gpa, 1);
+ if (name.toSlice(self)) |id| try self.string_bytes.ensureUnusedCapacity(self.gpa, id.len +
+ comptime std.fmt.count("{d}" ++ .{0}, .{std.math.maxInt(u32)}));
+ try self.string_indices.ensureUnusedCapacity(self.gpa, 1);
+ try self.globals.ensureUnusedCapacity(self.gpa, 1);
+ try self.next_unique_global_id.ensureUnusedCapacity(self.gpa, 1);
+}
+
+fn fnTypeAssumeCapacity(
+ self: *Builder,
+ ret: Type,
+ params: []const Type,
+ comptime kind: Type.Function.Kind,
+) if (build_options.have_llvm) Allocator.Error!Type else Type {
+ const tag: Type.Tag = switch (kind) {
+ .normal => .function,
+ .vararg => .vararg_function,
+ };
+ const Key = struct { ret: Type, params: []const Type };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag)));
+ hasher.update(std.mem.asBytes(&key.ret));
+ hasher.update(std.mem.sliceAsBytes(key.params));
+ return @truncate(hasher.final());
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ var rhs_extra = ctx.builder.typeExtraDataTrail(Type.Function, rhs_data.data);
+ const rhs_params = rhs_extra.trail.next(rhs_extra.data.params_len, Type, ctx.builder);
+ return rhs_data.tag == tag and lhs_key.ret == rhs_extra.data.ret and
+ std.mem.eql(Type, lhs_key.params, rhs_params);
+ }
+ };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(
+ Key{ .ret = ret, .params = params },
+ Adapter{ .builder = self },
+ );
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = .function,
+ .data = self.addTypeExtraAssumeCapacity(Type.Function{
+ .ret = ret,
+ .params_len = @intCast(params.len),
+ }),
+ });
+ self.type_extra.appendSliceAssumeCapacity(@ptrCast(params));
+ if (self.useLibLlvm()) {
+ const llvm_params = try self.gpa.alloc(*llvm.Type, params.len);
+ defer self.gpa.free(llvm_params);
+ for (llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(self);
+ self.llvm.types.appendAssumeCapacity(llvm.functionType(
+ ret.toLlvm(self),
+ llvm_params.ptr,
+ @intCast(llvm_params.len),
+ switch (kind) {
+ .normal => .False,
+ .vararg => .True,
+ },
+ ));
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn intTypeAssumeCapacity(self: *Builder, bits: u24) Type {
+ assert(bits > 0);
+ const result = self.getOrPutTypeNoExtraAssumeCapacity(.{ .tag = .integer, .data = bits });
+ if (self.useLibLlvm() and result.new)
+ self.llvm.types.appendAssumeCapacity(self.llvm.context.intType(bits));
+ return result.type;
+}
+
+fn ptrTypeAssumeCapacity(self: *Builder, addr_space: AddrSpace) Type {
+ const result = self.getOrPutTypeNoExtraAssumeCapacity(
+ .{ .tag = .pointer, .data = @intFromEnum(addr_space) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.types.appendAssumeCapacity(self.llvm.context.pointerType(@intFromEnum(addr_space)));
+ return result.type;
+}
+
+fn vectorTypeAssumeCapacity(
+ self: *Builder,
+ comptime kind: Type.Vector.Kind,
+ len: u32,
+ child: Type,
+) Type {
+ assert(child.isFloatingPoint() or child.isInteger(self) or child.isPointer(self));
+ const tag: Type.Tag = switch (kind) {
+ .normal => .vector,
+ .scalable => .scalable_vector,
+ };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Type.Vector) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(tag)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Type.Vector, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ return rhs_data.tag == tag and
+ std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Vector, rhs_data.data));
+ }
+ };
+ const data = Type.Vector{ .len = len, .child = child };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addTypeExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(switch (kind) {
+ .normal => llvm.Type.vectorType,
+ .scalable => llvm.Type.scalableVectorType,
+ }(child.toLlvm(self), @intCast(len)));
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type {
+ if (std.math.cast(u32, len)) |small_len| {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Type.Vector) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Type.Tag.small_array)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Type.Vector, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ return rhs_data.tag == .small_array and
+ std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Vector, rhs_data.data));
+ }
+ };
+ const data = Type.Vector{ .len = small_len, .child = child };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = .small_array,
+ .data = self.addTypeExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
+ child.toLlvm(self).arrayType(@intCast(len)),
+ );
+ }
+ return @enumFromInt(gop.index);
+ } else {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Type.Array) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Type.Tag.array)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Type.Array, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ return rhs_data.tag == .array and
+ std.meta.eql(lhs_key, ctx.builder.typeExtraData(Type.Array, rhs_data.data));
+ }
+ };
+ const data = Type.Array{
+ .len_lo = @truncate(len),
+ .len_hi = @intCast(len >> 32),
+ .child = child,
+ };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = .array,
+ .data = self.addTypeExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
+ child.toLlvm(self).arrayType(@intCast(len)),
+ );
+ }
+ return @enumFromInt(gop.index);
+ }
+}
+
+fn structTypeAssumeCapacity(
+ self: *Builder,
+ comptime kind: Type.Structure.Kind,
+ fields: []const Type,
+) if (build_options.have_llvm) Allocator.Error!Type else Type {
+ const tag: Type.Tag = switch (kind) {
+ .normal => .structure,
+ .@"packed" => .packed_structure,
+ };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: []const Type) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(tag)),
+ std.mem.sliceAsBytes(key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: []const Type, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ var rhs_extra = ctx.builder.typeExtraDataTrail(Type.Structure, rhs_data.data);
+ const rhs_fields = rhs_extra.trail.next(rhs_extra.data.fields_len, Type, ctx.builder);
+ return rhs_data.tag == tag and std.mem.eql(Type, lhs_key, rhs_fields);
+ }
+ };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(fields, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addTypeExtraAssumeCapacity(Type.Structure{
+ .fields_len = @intCast(fields.len),
+ }),
+ });
+ self.type_extra.appendSliceAssumeCapacity(@ptrCast(fields));
+ if (self.useLibLlvm()) {
+ const ExpectedContents = [expected_fields_len]*llvm.Type;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_fields = try allocator.alloc(*llvm.Type, fields.len);
+ defer allocator.free(llvm_fields);
+ for (llvm_fields, fields) |*llvm_field, field| llvm_field.* = field.toLlvm(self);
+
+ self.llvm.types.appendAssumeCapacity(self.llvm.context.structType(
+ llvm_fields.ptr,
+ @intCast(llvm_fields.len),
+ switch (kind) {
+ .normal => .False,
+ .@"packed" => .True,
+ },
+ ));
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: String) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Type.Tag.named_structure)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: String, _: void, rhs_index: usize) bool {
+ const rhs_data = ctx.builder.type_items.items[rhs_index];
+ return rhs_data.tag == .named_structure and
+ lhs_key == ctx.builder.typeExtraData(Type.NamedStructure, rhs_data.data).id;
+ }
+ };
+ var id = name;
+ if (name == .empty) {
+ id = self.next_unnamed_type;
+ assert(id != .none);
+ self.next_unnamed_type = @enumFromInt(@intFromEnum(id) + 1);
+ } else assert(!name.isAnon());
+ while (true) {
+ const type_gop = self.types.getOrPutAssumeCapacity(id);
+ if (!type_gop.found_existing) {
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(id, Adapter{ .builder = self });
+ assert(!gop.found_existing);
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(.{
+ .tag = .named_structure,
+ .data = self.addTypeExtraAssumeCapacity(Type.NamedStructure{
+ .id = id,
+ .body = .none,
+ }),
+ });
+ const result: Type = @enumFromInt(gop.index);
+ type_gop.value_ptr.* = result;
+ if (self.useLibLlvm()) self.llvm.types.appendAssumeCapacity(
+ self.llvm.context.structCreateNamed(id.toSlice(self) orelse ""),
+ );
+ return result;
+ }
+
+ const unique_gop = self.next_unique_type_id.getOrPutAssumeCapacity(name);
+ if (!unique_gop.found_existing) unique_gop.value_ptr.* = 2;
+ id = self.fmtAssumeCapacity("{s}.{d}", .{ name.toSlice(self).?, unique_gop.value_ptr.* });
+ unique_gop.value_ptr.* += 1;
+ }
+}
+
+fn ensureUnusedTypeCapacity(
+ self: *Builder,
+ count: usize,
+ comptime Extra: type,
+ trail_len: usize,
+) Allocator.Error!void {
+ try self.type_map.ensureUnusedCapacity(self.gpa, count);
+ try self.type_items.ensureUnusedCapacity(self.gpa, count);
+ try self.type_extra.ensureUnusedCapacity(
+ self.gpa,
+ count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ );
+ if (self.useLibLlvm()) try self.llvm.types.ensureUnusedCapacity(self.gpa, count);
+}
+
+fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { new: bool, type: Type } {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Type.Item) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Type.Tag.simple)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Type.Item, _: void, rhs_index: usize) bool {
+ const lhs_bits: u32 = @bitCast(lhs_key);
+ const rhs_bits: u32 = @bitCast(ctx.builder.type_items.items[rhs_index]);
+ return lhs_bits == rhs_bits;
+ }
+ };
+ const gop = self.type_map.getOrPutAssumeCapacityAdapted(item, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.type_items.appendAssumeCapacity(item);
+ }
+ return .{ .new = !gop.found_existing, .type = @enumFromInt(gop.index) };
+}
+
+fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraIndex {
+ const result: Type.Item.ExtraIndex = @intCast(self.type_extra.items.len);
+ inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ self.type_extra.appendAssumeCapacity(switch (field.type) {
+ u32 => value,
+ String, Type => @intFromEnum(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ });
+ }
+ return result;
+}
+
+const TypeExtraDataTrail = struct {
+ index: Type.Item.ExtraIndex,
+
+ fn nextMut(self: *TypeExtraDataTrail, len: u32, comptime Item: type, builder: *Builder) []Item {
+ const items: []Item = @ptrCast(builder.type_extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+
+ fn next(
+ self: *TypeExtraDataTrail,
+ len: u32,
+ comptime Item: type,
+ builder: *const Builder,
+ ) []const Item {
+ const items: []const Item = @ptrCast(builder.type_extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+};
+
+fn typeExtraDataTrail(
+ self: *const Builder,
+ comptime T: type,
+ index: Type.Item.ExtraIndex,
+) struct { data: T, trail: TypeExtraDataTrail } {
+ var result: T = undefined;
+ const fields = @typeInfo(T).Struct.fields;
+ inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, value|
+ @field(result, field.name) = switch (field.type) {
+ u32 => value,
+ String, Type => @enumFromInt(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ };
+ return .{
+ .data = result,
+ .trail = .{ .index = index + @as(Type.Item.ExtraIndex, @intCast(fields.len)) },
+ };
+}
+
+fn typeExtraData(self: *const Builder, comptime T: type, index: Type.Item.ExtraIndex) T {
+ return self.typeExtraDataTrail(T, index).data;
+}
+
+fn bigIntConstAssumeCapacity(
+ self: *Builder,
+ ty: Type,
+ value: std.math.big.int.Const,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ const type_item = self.type_items.items[@intFromEnum(ty)];
+ assert(type_item.tag == .integer);
+ const bits = type_item.data;
+
+ const ExpectedContents = extern struct {
+ limbs: [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb,
+ llvm_limbs: if (build_options.have_llvm) [64 / @sizeOf(u64)]u64 else void,
+ };
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ var limbs: []std.math.big.Limb = &.{};
+ defer allocator.free(limbs);
+ const canonical_value = if (value.fitsInTwosComp(.signed, bits)) value else canon: {
+ assert(value.fitsInTwosComp(.unsigned, bits));
+ limbs = try allocator.alloc(std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits));
+ var temp_value = std.math.big.int.Mutable.init(limbs, 0);
+ temp_value.truncate(value, .signed, bits);
+ break :canon temp_value.toConst();
+ };
+ assert(canonical_value.fitsInTwosComp(.signed, bits));
+
+ const ExtraPtr = *align(@alignOf(std.math.big.Limb)) Constant.Integer;
+ const Key = struct { tag: Constant.Tag, type: Type, limbs: []const std.math.big.Limb };
+ const tag: Constant.Tag = switch (canonical_value.positive) {
+ true => .positive_integer,
+ false => .negative_integer,
+ };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
+ hasher.update(std.mem.asBytes(&key.type));
+ hasher.update(std.mem.sliceAsBytes(key.limbs));
+ return @truncate(hasher.final());
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra: ExtraPtr =
+ @ptrCast(ctx.builder.constant_limbs.items[rhs_data..][0..Constant.Integer.limbs]);
+ const rhs_limbs = ctx.builder.constant_limbs
+ .items[rhs_data + Constant.Integer.limbs ..][0..rhs_extra.limbs_len];
+ return lhs_key.type == rhs_extra.type and
+ std.mem.eql(std.math.big.Limb, lhs_key.limbs, rhs_limbs);
+ }
+ };
+
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(
+ Key{ .tag = tag, .type = ty, .limbs = canonical_value.limbs },
+ Adapter{ .builder = self },
+ );
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = @intCast(self.constant_limbs.items.len),
+ });
+ const extra: ExtraPtr =
+ @ptrCast(self.constant_limbs.addManyAsArrayAssumeCapacity(Constant.Integer.limbs));
+ extra.* = .{ .type = ty, .limbs_len = @intCast(canonical_value.limbs.len) };
+ self.constant_limbs.appendSliceAssumeCapacity(canonical_value.limbs);
+ if (self.useLibLlvm()) {
+ const llvm_type = ty.toLlvm(self);
+ if (canonical_value.to(c_longlong)) |small| {
+ self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(@bitCast(small), .True));
+ } else |_| if (canonical_value.to(c_ulonglong)) |small| {
+ self.llvm.constants.appendAssumeCapacity(llvm_type.constInt(small, .False));
+ } else |_| {
+ const llvm_limbs = try allocator.alloc(u64, std.math.divCeil(
+ usize,
+ if (canonical_value.positive) canonical_value.bitCountAbs() else bits,
+ @bitSizeOf(u64),
+ ) catch unreachable);
+ defer allocator.free(llvm_limbs);
+ var limb_index: usize = 0;
+ var borrow: std.math.big.Limb = 0;
+ for (llvm_limbs) |*result_limb| {
+ var llvm_limb: u64 = 0;
+ inline for (0..Constant.Integer.limbs) |shift| {
+ const limb = if (limb_index < canonical_value.limbs.len)
+ canonical_value.limbs[limb_index]
+ else
+ 0;
+ limb_index += 1;
+ llvm_limb |= @as(u64, limb) << shift * @bitSizeOf(std.math.big.Limb);
+ }
+ if (!canonical_value.positive) {
+ const overflow = @subWithOverflow(borrow, llvm_limb);
+ llvm_limb = overflow[0];
+ borrow -%= overflow[1];
+ assert(borrow == 0 or borrow == std.math.maxInt(u64));
+ }
+ result_limb.* = llvm_limb;
+ }
+ self.llvm.constants.appendAssumeCapacity(
+ llvm_type.constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs.ptr),
+ );
+ }
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn halfConstAssumeCapacity(self: *Builder, val: f16) Constant {
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .half, .data = @as(u16, @bitCast(val)) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
+ if (std.math.isSignalNan(val))
+ Type.i16.toLlvm(self).constInt(@as(u16, @bitCast(val)), .False)
+ .constBitCast(Type.half.toLlvm(self))
+ else
+ Type.half.toLlvm(self).constReal(val),
+ );
+ return result.constant;
+}
+
+fn bfloatConstAssumeCapacity(self: *Builder, val: f32) Constant {
+ assert(@as(u16, @truncate(@as(u32, @bitCast(val)))) == 0);
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .bfloat, .data = @bitCast(val) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
+ if (std.math.isSignalNan(val))
+ Type.i16.toLlvm(self).constInt(@as(u32, @bitCast(val)) >> 16, .False)
+ .constBitCast(Type.bfloat.toLlvm(self))
+ else
+ Type.bfloat.toLlvm(self).constReal(val),
+ );
+
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(Type.bfloat.toLlvm(self).constReal(val));
+ return result.constant;
+}
+
+fn floatConstAssumeCapacity(self: *Builder, val: f32) Constant {
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .float, .data = @bitCast(val) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
+ if (std.math.isSignalNan(val))
+ Type.i32.toLlvm(self).constInt(@as(u32, @bitCast(val)), .False)
+ .constBitCast(Type.float.toLlvm(self))
+ else
+ Type.float.toLlvm(self).constReal(val),
+ );
+ return result.constant;
+}
+
+fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: f64) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.double)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: f64, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .double) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Double, rhs_data);
+ return @as(u64, @bitCast(lhs_key)) == @as(u64, rhs_extra.hi) << 32 | rhs_extra.lo;
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .double,
+ .data = self.addConstantExtraAssumeCapacity(Constant.Double{
+ .lo = @truncate(@as(u64, @bitCast(val))),
+ .hi = @intCast(@as(u64, @bitCast(val)) >> 32),
+ }),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ if (std.math.isSignalNan(val))
+ Type.i64.toLlvm(self).constInt(@as(u64, @bitCast(val)), .False)
+ .constBitCast(Type.double.toLlvm(self))
+ else
+ Type.double.toLlvm(self).constReal(val),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn fp128ConstAssumeCapacity(self: *Builder, val: f128) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: f128) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.fp128)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: f128, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .fp128) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Fp128, rhs_data);
+ return @as(u128, @bitCast(lhs_key)) == @as(u128, rhs_extra.hi_hi) << 96 |
+ @as(u128, rhs_extra.hi_lo) << 64 | @as(u128, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo;
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .fp128,
+ .data = self.addConstantExtraAssumeCapacity(Constant.Fp128{
+ .lo_lo = @truncate(@as(u128, @bitCast(val))),
+ .lo_hi = @truncate(@as(u128, @bitCast(val)) >> 32),
+ .hi_lo = @truncate(@as(u128, @bitCast(val)) >> 64),
+ .hi_hi = @intCast(@as(u128, @bitCast(val)) >> 96),
+ }),
+ });
+ if (self.useLibLlvm()) {
+ const llvm_limbs = [_]u64{
+ @truncate(@as(u128, @bitCast(val))),
+ @intCast(@as(u128, @bitCast(val)) >> 64),
+ };
+ self.llvm.constants.appendAssumeCapacity(
+ Type.i128.toLlvm(self)
+ .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs)
+ .constBitCast(Type.fp128.toLlvm(self)),
+ );
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn x86_fp80ConstAssumeCapacity(self: *Builder, val: f80) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: f80) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.x86_fp80)),
+ std.mem.asBytes(&key)[0..10],
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: f80, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .x86_fp80) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Fp80, rhs_data);
+ return @as(u80, @bitCast(lhs_key)) == @as(u80, rhs_extra.hi) << 64 |
+ @as(u80, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo;
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .x86_fp80,
+ .data = self.addConstantExtraAssumeCapacity(Constant.Fp80{
+ .lo_lo = @truncate(@as(u80, @bitCast(val))),
+ .lo_hi = @truncate(@as(u80, @bitCast(val)) >> 32),
+ .hi = @intCast(@as(u80, @bitCast(val)) >> 64),
+ }),
+ });
+ if (self.useLibLlvm()) {
+ const llvm_limbs = [_]u64{
+ @truncate(@as(u80, @bitCast(val))),
+ @intCast(@as(u80, @bitCast(val)) >> 64),
+ };
+ self.llvm.constants.appendAssumeCapacity(
+ Type.i80.toLlvm(self)
+ .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), &llvm_limbs)
+ .constBitCast(Type.x86_fp80.toLlvm(self)),
+ );
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn ppc_fp128ConstAssumeCapacity(self: *Builder, val: [2]f64) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: [2]f64) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.ppc_fp128)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: [2]f64, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .ppc_fp128) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Fp128, rhs_data);
+ return @as(u64, @bitCast(lhs_key[0])) == @as(u64, rhs_extra.lo_hi) << 32 | rhs_extra.lo_lo and
+ @as(u64, @bitCast(lhs_key[1])) == @as(u64, rhs_extra.hi_hi) << 32 | rhs_extra.hi_lo;
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(val, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .ppc_fp128,
+ .data = self.addConstantExtraAssumeCapacity(Constant.Fp128{
+ .lo_lo = @truncate(@as(u64, @bitCast(val[0]))),
+ .lo_hi = @intCast(@as(u64, @bitCast(val[0])) >> 32),
+ .hi_lo = @truncate(@as(u64, @bitCast(val[1]))),
+ .hi_hi = @intCast(@as(u64, @bitCast(val[1])) >> 32),
+ }),
+ });
+ if (self.useLibLlvm()) {
+ const llvm_limbs: *const [2]u64 = @ptrCast(&val);
+ self.llvm.constants.appendAssumeCapacity(
+ Type.i128.toLlvm(self)
+ .constIntOfArbitraryPrecision(@intCast(llvm_limbs.len), llvm_limbs)
+ .constBitCast(Type.ppc_fp128.toLlvm(self)),
+ );
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn nullConstAssumeCapacity(self: *Builder, ty: Type) Constant {
+ assert(self.type_items.items[@intFromEnum(ty)].tag == .pointer);
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .null, .data = @intFromEnum(ty) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
+ return result.constant;
+}
+
+fn noneConstAssumeCapacity(self: *Builder, ty: Type) Constant {
+ assert(ty == .token);
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .none, .data = @intFromEnum(ty) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
+ return result.constant;
+}
+
+fn structConstAssumeCapacity(
+ self: *Builder,
+ ty: Type,
+ vals: []const Constant,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ const type_item = self.type_items.items[@intFromEnum(ty)];
+ var extra = self.typeExtraDataTrail(Type.Structure, switch (type_item.tag) {
+ .structure, .packed_structure => type_item.data,
+ .named_structure => data: {
+ const body_ty = self.typeExtraData(Type.NamedStructure, type_item.data).body;
+ const body_item = self.type_items.items[@intFromEnum(body_ty)];
+ switch (body_item.tag) {
+ .structure, .packed_structure => break :data body_item.data,
+ else => unreachable,
+ }
+ },
+ else => unreachable,
+ });
+ const fields = extra.trail.next(extra.data.fields_len, Type, self);
+ for (fields, vals) |field, val| assert(field == val.typeOf(self));
+
+ for (vals) |val| {
+ if (!val.isZeroInit(self)) break;
+ } else return self.zeroInitConstAssumeCapacity(ty);
+
+ const tag: Constant.Tag = switch (ty.unnamedTag(self)) {
+ .structure => .structure,
+ .packed_structure => .packed_structure,
+ else => unreachable,
+ };
+ const result = self.getOrPutConstantAggregateAssumeCapacity(tag, ty, vals);
+ if (self.useLibLlvm() and result.new) {
+ const ExpectedContents = [expected_fields_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
+ defer allocator.free(llvm_vals);
+ for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
+
+ self.llvm.constants.appendAssumeCapacity(
+ ty.toLlvm(self).constNamedStruct(llvm_vals.ptr, @intCast(llvm_vals.len)),
+ );
+ }
+ return result.constant;
+}
+
+fn arrayConstAssumeCapacity(
+ self: *Builder,
+ ty: Type,
+ vals: []const Constant,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ const type_item = self.type_items.items[@intFromEnum(ty)];
+ const type_extra: struct { len: u64, child: Type } = switch (type_item.tag) {
+ inline .small_array, .array => |kind| extra: {
+ const extra = self.typeExtraData(switch (kind) {
+ .small_array => Type.Vector,
+ .array => Type.Array,
+ else => unreachable,
+ }, type_item.data);
+ break :extra .{ .len = extra.length(), .child = extra.child };
+ },
+ else => unreachable,
+ };
+ assert(type_extra.len == vals.len);
+ for (vals) |val| assert(type_extra.child == val.typeOf(self));
+
+ for (vals) |val| {
+ if (!val.isZeroInit(self)) break;
+ } else return self.zeroInitConstAssumeCapacity(ty);
+
+ const result = self.getOrPutConstantAggregateAssumeCapacity(.array, ty, vals);
+ if (self.useLibLlvm() and result.new) {
+ const ExpectedContents = [expected_fields_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
+ defer allocator.free(llvm_vals);
+ for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
+
+ self.llvm.constants.appendAssumeCapacity(
+ type_extra.child.toLlvm(self).constArray(llvm_vals.ptr, @intCast(llvm_vals.len)),
+ );
+ }
+ return result.constant;
+}
+
+fn stringConstAssumeCapacity(self: *Builder, val: String) Constant {
+ const slice = val.toSlice(self).?;
+ const ty = self.arrayTypeAssumeCapacity(slice.len, .i8);
+ if (std.mem.allEqual(u8, slice, 0)) return self.zeroInitConstAssumeCapacity(ty);
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .string, .data = @intFromEnum(val) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
+ self.llvm.context.constString(slice.ptr, @intCast(slice.len), .True),
+ );
+ return result.constant;
+}
+
+fn stringNullConstAssumeCapacity(self: *Builder, val: String) Constant {
+ const slice = val.toSlice(self).?;
+ const ty = self.arrayTypeAssumeCapacity(slice.len + 1, .i8);
+ if (std.mem.allEqual(u8, slice, 0)) return self.zeroInitConstAssumeCapacity(ty);
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .string_null, .data = @intFromEnum(val) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(
+ self.llvm.context.constString(slice.ptr, @intCast(slice.len + 1), .True),
+ );
+ return result.constant;
+}
+
+fn vectorConstAssumeCapacity(
+ self: *Builder,
+ ty: Type,
+ vals: []const Constant,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ assert(ty.isVector(self));
+ assert(ty.vectorLen(self) == vals.len);
+ for (vals) |val| assert(ty.childType(self) == val.typeOf(self));
+
+ for (vals[1..]) |val| {
+ if (vals[0] != val) break;
+ } else return self.splatConstAssumeCapacity(ty, vals[0]);
+ for (vals) |val| {
+ if (!val.isZeroInit(self)) break;
+ } else return self.zeroInitConstAssumeCapacity(ty);
+
+ const result = self.getOrPutConstantAggregateAssumeCapacity(.vector, ty, vals);
+ if (self.useLibLlvm() and result.new) {
+ const ExpectedContents = [expected_fields_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_vals = try allocator.alloc(*llvm.Value, vals.len);
+ defer allocator.free(llvm_vals);
+ for (llvm_vals, vals) |*llvm_val, val| llvm_val.* = val.toLlvm(self);
+
+ self.llvm.constants.appendAssumeCapacity(
+ llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)),
+ );
+ }
+ return result.constant;
+}
+
+fn splatConstAssumeCapacity(
+ self: *Builder,
+ ty: Type,
+ val: Constant,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ assert(ty.scalarType(self) == val.typeOf(self));
+
+ if (!ty.isVector(self)) return val;
+ if (val.isZeroInit(self)) return self.zeroInitConstAssumeCapacity(ty);
+
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.Splat) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.splat)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.Splat, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .splat) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Splat, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.Splat{ .type = ty, .value = val };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .splat,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) {
+ const ExpectedContents = [expected_fields_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_vals = try allocator.alloc(*llvm.Value, ty.vectorLen(self));
+ defer allocator.free(llvm_vals);
+ @memset(llvm_vals, val.toLlvm(self));
+
+ self.llvm.constants.appendAssumeCapacity(
+ llvm.constVector(llvm_vals.ptr, @intCast(llvm_vals.len)),
+ );
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn zeroInitConstAssumeCapacity(self: *Builder, ty: Type) Constant {
+ switch (ty) {
+ inline .half,
+ .bfloat,
+ .float,
+ .double,
+ .fp128,
+ .x86_fp80,
+ => |tag| return @field(Builder, @tagName(tag) ++ "ConstAssumeCapacity")(self, 0.0),
+ .ppc_fp128 => return self.ppc_fp128ConstAssumeCapacity(.{ 0.0, 0.0 }),
+ .token => return .none,
+ .i1 => return .false,
+ else => switch (self.type_items.items[@intFromEnum(ty)].tag) {
+ .simple,
+ .function,
+ .vararg_function,
+ => unreachable,
+ .integer => {
+ var limbs: [std.math.big.int.calcLimbLen(0)]std.math.big.Limb = undefined;
+ const bigint = std.math.big.int.Mutable.init(&limbs, 0);
+ return self.bigIntConstAssumeCapacity(ty, bigint.toConst()) catch unreachable;
+ },
+ .pointer => return self.nullConstAssumeCapacity(ty),
+ .target,
+ .vector,
+ .scalable_vector,
+ .small_array,
+ .array,
+ .structure,
+ .packed_structure,
+ .named_structure,
+ => {},
+ },
+ }
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .zeroinitializer, .data = @intFromEnum(ty) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).constNull());
+ return result.constant;
+}
+
+fn undefConstAssumeCapacity(self: *Builder, ty: Type) Constant {
+ switch (self.type_items.items[@intFromEnum(ty)].tag) {
+ .simple => switch (ty) {
+ .void, .label => unreachable,
+ else => {},
+ },
+ .function, .vararg_function => unreachable,
+ else => {},
+ }
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .undef, .data = @intFromEnum(ty) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getUndef());
+ return result.constant;
+}
+
+fn poisonConstAssumeCapacity(self: *Builder, ty: Type) Constant {
+ switch (self.type_items.items[@intFromEnum(ty)].tag) {
+ .simple => switch (ty) {
+ .void, .label => unreachable,
+ else => {},
+ },
+ .function, .vararg_function => unreachable,
+ else => {},
+ }
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .poison, .data = @intFromEnum(ty) },
+ );
+ if (self.useLibLlvm() and result.new)
+ self.llvm.constants.appendAssumeCapacity(ty.toLlvm(self).getPoison());
+ return result.constant;
+}
+
+fn blockAddrConstAssumeCapacity(
+ self: *Builder,
+ function: Function.Index,
+ block: Function.Block.Index,
+) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.BlockAddress) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.blockaddress)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.BlockAddress, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .blockaddress) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.BlockAddress, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.BlockAddress{ .function = function, .block = block };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .blockaddress,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ function.toLlvm(self).blockAddress(block.toValue(self, function).toLlvm(self, function)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn dsoLocalEquivalentConstAssumeCapacity(self: *Builder, function: Function.Index) Constant {
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .dso_local_equivalent, .data = @intFromEnum(function) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined);
+ return result.constant;
+}
+
+fn noCfiConstAssumeCapacity(self: *Builder, function: Function.Index) Constant {
+ const result = self.getOrPutConstantNoExtraAssumeCapacity(
+ .{ .tag = .no_cfi, .data = @intFromEnum(function) },
+ );
+ if (self.useLibLlvm() and result.new) self.llvm.constants.appendAssumeCapacity(undefined);
+ return result.constant;
+}
+
+fn convTag(
+ self: *Builder,
+ comptime Tag: type,
+ signedness: Constant.Cast.Signedness,
+ val_ty: Type,
+ ty: Type,
+) Tag {
+ assert(val_ty != ty);
+ return switch (val_ty.scalarTag(self)) {
+ .simple => switch (ty.scalarTag(self)) {
+ .simple => switch (std.math.order(val_ty.scalarBits(self), ty.scalarBits(self))) {
+ .lt => .fpext,
+ .eq => unreachable,
+ .gt => .fptrunc,
+ },
+ .integer => switch (signedness) {
+ .unsigned => .fptoui,
+ .signed => .fptosi,
+ .unneeded => unreachable,
+ },
+ else => unreachable,
+ },
+ .integer => switch (ty.scalarTag(self)) {
+ .simple => switch (signedness) {
+ .unsigned => .uitofp,
+ .signed => .sitofp,
+ .unneeded => unreachable,
+ },
+ .integer => switch (std.math.order(val_ty.scalarBits(self), ty.scalarBits(self))) {
+ .lt => switch (signedness) {
+ .unsigned => .zext,
+ .signed => .sext,
+ .unneeded => unreachable,
+ },
+ .eq => unreachable,
+ .gt => .trunc,
+ },
+ .pointer => .inttoptr,
+ else => unreachable,
+ },
+ .pointer => switch (ty.scalarTag(self)) {
+ .integer => .ptrtoint,
+ .pointer => .addrspacecast,
+ else => unreachable,
+ },
+ else => unreachable,
+ };
+}
+
+fn convConstAssumeCapacity(
+ self: *Builder,
+ signedness: Constant.Cast.Signedness,
+ val: Constant,
+ ty: Type,
+) Constant {
+ const val_ty = val.typeOf(self);
+ if (val_ty == ty) return val;
+ return self.castConstAssumeCapacity(self.convTag(Constant.Tag, signedness, val_ty, ty), val, ty);
+}
+
+fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, val: Constant, ty: Type) Constant {
+ const Key = struct { tag: Constant.Tag, cast: Constant.Cast };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ std.hash.uint32(@intFromEnum(key.tag)),
+ std.mem.asBytes(&key.cast),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Cast, rhs_data);
+ return std.meta.eql(lhs_key.cast, rhs_extra);
+ }
+ };
+ const data = Key{ .tag = tag, .cast = .{ .val = val, .type = ty } };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addConstantExtraAssumeCapacity(data.cast),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) {
+ .trunc => &llvm.Value.constTrunc,
+ .zext => &llvm.Value.constZExt,
+ .sext => &llvm.Value.constSExt,
+ .fptrunc => &llvm.Value.constFPTrunc,
+ .fpext => &llvm.Value.constFPExt,
+ .fptoui => &llvm.Value.constFPToUI,
+ .fptosi => &llvm.Value.constFPToSI,
+ .uitofp => &llvm.Value.constUIToFP,
+ .sitofp => &llvm.Value.constSIToFP,
+ .ptrtoint => &llvm.Value.constPtrToInt,
+ .inttoptr => &llvm.Value.constIntToPtr,
+ .bitcast => &llvm.Value.constBitCast,
+ else => unreachable,
+ }(val.toLlvm(self), ty.toLlvm(self)));
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn gepConstAssumeCapacity(
+ self: *Builder,
+ comptime kind: Constant.GetElementPtr.Kind,
+ ty: Type,
+ base: Constant,
+ inrange: ?u16,
+ indices: []const Constant,
+) if (build_options.have_llvm) Allocator.Error!Constant else Constant {
+ const tag: Constant.Tag = switch (kind) {
+ .normal => .getelementptr,
+ .inbounds => .@"getelementptr inbounds",
+ };
+ const base_ty = base.typeOf(self);
+ const base_is_vector = base_ty.isVector(self);
+
+ const VectorInfo = struct {
+ kind: Type.Vector.Kind,
+ len: u32,
+
+ fn init(vector_ty: Type, builder: *const Builder) @This() {
+ return .{ .kind = vector_ty.vectorKind(builder), .len = vector_ty.vectorLen(builder) };
+ }
+ };
+ var vector_info: ?VectorInfo = if (base_is_vector) VectorInfo.init(base_ty, self) else null;
+ for (indices) |index| {
+ const index_ty = index.typeOf(self);
+ switch (index_ty.tag(self)) {
+ .integer => {},
+ .vector, .scalable_vector => {
+ const index_info = VectorInfo.init(index_ty, self);
+ if (vector_info) |info|
+ assert(std.meta.eql(info, index_info))
+ else
+ vector_info = index_info;
+ },
+ else => unreachable,
+ }
+ }
+ if (!base_is_vector) if (vector_info) |info| switch (info.kind) {
+ inline else => |vector_kind| _ = self.vectorTypeAssumeCapacity(vector_kind, info.len, base_ty),
+ };
+
+ const Key = struct {
+ type: Type,
+ base: Constant,
+ inrange: Constant.GetElementPtr.InRangeIndex,
+ indices: []const Constant,
+ };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag)));
+ hasher.update(std.mem.asBytes(&key.type));
+ hasher.update(std.mem.asBytes(&key.base));
+ hasher.update(std.mem.asBytes(&key.inrange));
+ hasher.update(std.mem.sliceAsBytes(key.indices));
+ return @truncate(hasher.final());
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != tag) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ var rhs_extra = ctx.builder.constantExtraDataTrail(Constant.GetElementPtr, rhs_data);
+ const rhs_indices =
+ rhs_extra.trail.next(rhs_extra.data.info.indices_len, Constant, ctx.builder);
+ return lhs_key.type == rhs_extra.data.type and lhs_key.base == rhs_extra.data.base and
+ lhs_key.inrange == rhs_extra.data.info.inrange and
+ std.mem.eql(Constant, lhs_key.indices, rhs_indices);
+ }
+ };
+ const data = Key{
+ .type = ty,
+ .base = base,
+ .inrange = if (inrange) |index| @enumFromInt(index) else .none,
+ .indices = indices,
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addConstantExtraAssumeCapacity(Constant.GetElementPtr{
+ .type = ty,
+ .base = base,
+ .info = .{ .indices_len = @intCast(indices.len), .inrange = data.inrange },
+ }),
+ });
+ self.constant_extra.appendSliceAssumeCapacity(@ptrCast(indices));
+ if (self.useLibLlvm()) {
+ const ExpectedContents = [expected_gep_indices_len]*llvm.Value;
+ var stack align(@alignOf(ExpectedContents)) =
+ std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
+ const allocator = stack.get();
+
+ const llvm_indices = try allocator.alloc(*llvm.Value, indices.len);
+ defer allocator.free(llvm_indices);
+ for (llvm_indices, indices) |*llvm_index, index| llvm_index.* = index.toLlvm(self);
+
+ self.llvm.constants.appendAssumeCapacity(switch (kind) {
+ .normal => llvm.Type.constGEP,
+ .inbounds => llvm.Type.constInBoundsGEP,
+ }(ty.toLlvm(self), base.toLlvm(self), llvm_indices.ptr, @intCast(llvm_indices.len)));
+ }
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn icmpConstAssumeCapacity(
+ self: *Builder,
+ cond: IntegerCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.Compare) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ std.hash.uint32(@intFromEnum(Constant.tag.icmp)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.Compare, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .icmp) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Compare, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.Compare{ .cond = @intFromEnum(cond), .lhs = lhs, .rhs = rhs };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .icmp,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ llvm.constICmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn fcmpConstAssumeCapacity(
+ self: *Builder,
+ cond: FloatCondition,
+ lhs: Constant,
+ rhs: Constant,
+) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.Compare) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ std.hash.uint32(@intFromEnum(Constant.tag.fcmp)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.Compare, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .fcmp) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Compare, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.Compare{ .cond = @intFromEnum(cond), .lhs = lhs, .rhs = rhs };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .fcmp,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ llvm.constFCmp(@enumFromInt(@intFromEnum(cond)), lhs.toLlvm(self), rhs.toLlvm(self)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn extractElementConstAssumeCapacity(
+ self: *Builder,
+ val: Constant,
+ index: Constant,
+) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.ExtractElement) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.extractelement)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.ExtractElement, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .extractelement) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.ExtractElement, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.ExtractElement{ .val = val, .index = index };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .extractelement,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ val.toLlvm(self).constExtractElement(index.toLlvm(self)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn insertElementConstAssumeCapacity(
+ self: *Builder,
+ val: Constant,
+ elem: Constant,
+ index: Constant,
+) Constant {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.InsertElement) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.insertelement)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.InsertElement, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .insertelement) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.InsertElement, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.InsertElement{ .val = val, .elem = elem, .index = index };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .insertelement,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ val.toLlvm(self).constInsertElement(elem.toLlvm(self), index.toLlvm(self)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn shuffleVectorConstAssumeCapacity(
+ self: *Builder,
+ lhs: Constant,
+ rhs: Constant,
+ mask: Constant,
+) Constant {
+ assert(lhs.typeOf(self).isVector(self.builder));
+ assert(lhs.typeOf(self) == rhs.typeOf(self));
+ assert(mask.typeOf(self).scalarType(self).isInteger(self));
+ _ = lhs.typeOf(self).changeLengthAssumeCapacity(mask.typeOf(self).vectorLen(self), self);
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.ShuffleVector) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ comptime std.hash.uint32(@intFromEnum(Constant.Tag.shufflevector)),
+ std.mem.asBytes(&key),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.ShuffleVector, _: void, rhs_index: usize) bool {
+ if (ctx.builder.constant_items.items(.tag)[rhs_index] != .shufflevector) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.ShuffleVector, rhs_data);
+ return std.meta.eql(lhs_key, rhs_extra);
+ }
+ };
+ const data = Constant.ShuffleVector{ .lhs = lhs, .rhs = rhs, .mask = mask };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = .shufflevector,
+ .data = self.addConstantExtraAssumeCapacity(data),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(
+ lhs.toLlvm(self).constShuffleVector(rhs.toLlvm(self), mask.toLlvm(self)),
+ );
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn binConstAssumeCapacity(
+ self: *Builder,
+ tag: Constant.Tag,
+ lhs: Constant,
+ rhs: Constant,
+) Constant {
+ switch (tag) {
+ .add,
+ .@"add nsw",
+ .@"add nuw",
+ .sub,
+ .@"sub nsw",
+ .@"sub nuw",
+ .mul,
+ .@"mul nsw",
+ .@"mul nuw",
+ .shl,
+ .lshr,
+ .ashr,
+ .@"and",
+ .@"or",
+ .xor,
+ => {},
+ else => unreachable,
+ }
+ const Key = struct { tag: Constant.Tag, bin: Constant.Binary };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ std.hash.uint32(@intFromEnum(key.tag)),
+ std.mem.asBytes(&key.bin),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ const rhs_extra = ctx.builder.constantExtraData(Constant.Binary, rhs_data);
+ return std.meta.eql(lhs_key.bin, rhs_extra);
+ }
+ };
+ const data = Key{ .tag = tag, .bin = .{ .lhs = lhs, .rhs = rhs } };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(data, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addConstantExtraAssumeCapacity(data.bin),
+ });
+ if (self.useLibLlvm()) self.llvm.constants.appendAssumeCapacity(switch (tag) {
+ .add => &llvm.Value.constAdd,
+ .sub => &llvm.Value.constSub,
+ .mul => &llvm.Value.constMul,
+ .shl => &llvm.Value.constShl,
+ .lshr => &llvm.Value.constLShr,
+ .ashr => &llvm.Value.constAShr,
+ .@"and" => &llvm.Value.constAnd,
+ .@"or" => &llvm.Value.constOr,
+ .xor => &llvm.Value.constXor,
+ else => unreachable,
+ }(lhs.toLlvm(self), rhs.toLlvm(self)));
+ }
+ return @enumFromInt(gop.index);
+}
+
+fn ensureUnusedConstantCapacity(
+ self: *Builder,
+ count: usize,
+ comptime Extra: type,
+ trail_len: usize,
+) Allocator.Error!void {
+ try self.constant_map.ensureUnusedCapacity(self.gpa, count);
+ try self.constant_items.ensureUnusedCapacity(self.gpa, count);
+ try self.constant_extra.ensureUnusedCapacity(
+ self.gpa,
+ count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ );
+ if (self.useLibLlvm()) try self.llvm.constants.ensureUnusedCapacity(self.gpa, count);
+}
+
+fn getOrPutConstantNoExtraAssumeCapacity(
+ self: *Builder,
+ item: Constant.Item,
+) struct { new: bool, constant: Constant } {
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Constant.Item) u32 {
+ return @truncate(std.hash.Wyhash.hash(
+ std.hash.uint32(@intFromEnum(key.tag)),
+ std.mem.asBytes(&key.data),
+ ));
+ }
+ pub fn eql(ctx: @This(), lhs_key: Constant.Item, _: void, rhs_index: usize) bool {
+ return std.meta.eql(lhs_key, ctx.builder.constant_items.get(rhs_index));
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(item, Adapter{ .builder = self });
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(item);
+ }
+ return .{ .new = !gop.found_existing, .constant = @enumFromInt(gop.index) };
+}
+
+fn getOrPutConstantAggregateAssumeCapacity(
+ self: *Builder,
+ tag: Constant.Tag,
+ ty: Type,
+ vals: []const Constant,
+) struct { new: bool, constant: Constant } {
+ switch (tag) {
+ .structure, .packed_structure, .array, .vector => {},
+ else => unreachable,
+ }
+ const Key = struct { tag: Constant.Tag, type: Type, vals: []const Constant };
+ const Adapter = struct {
+ builder: *const Builder,
+ pub fn hash(_: @This(), key: Key) u32 {
+ var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
+ hasher.update(std.mem.asBytes(&key.type));
+ hasher.update(std.mem.sliceAsBytes(key.vals));
+ return @truncate(hasher.final());
+ }
+ pub fn eql(ctx: @This(), lhs_key: Key, _: void, rhs_index: usize) bool {
+ if (lhs_key.tag != ctx.builder.constant_items.items(.tag)[rhs_index]) return false;
+ const rhs_data = ctx.builder.constant_items.items(.data)[rhs_index];
+ var rhs_extra = ctx.builder.constantExtraDataTrail(Constant.Aggregate, rhs_data);
+ if (lhs_key.type != rhs_extra.data.type) return false;
+ const rhs_vals = rhs_extra.trail.next(@intCast(lhs_key.vals.len), Constant, ctx.builder);
+ return std.mem.eql(Constant, lhs_key.vals, rhs_vals);
+ }
+ };
+ const gop = self.constant_map.getOrPutAssumeCapacityAdapted(
+ Key{ .tag = tag, .type = ty, .vals = vals },
+ Adapter{ .builder = self },
+ );
+ if (!gop.found_existing) {
+ gop.key_ptr.* = {};
+ gop.value_ptr.* = {};
+ self.constant_items.appendAssumeCapacity(.{
+ .tag = tag,
+ .data = self.addConstantExtraAssumeCapacity(Constant.Aggregate{ .type = ty }),
+ });
+ self.constant_extra.appendSliceAssumeCapacity(@ptrCast(vals));
+ }
+ return .{ .new = !gop.found_existing, .constant = @enumFromInt(gop.index) };
+}
+
+fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item.ExtraIndex {
+ const result: Constant.Item.ExtraIndex = @intCast(self.constant_extra.items.len);
+ inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ const value = @field(extra, field.name);
+ self.constant_extra.appendAssumeCapacity(switch (field.type) {
+ u32 => value,
+ Type, Constant, Function.Index, Function.Block.Index => @intFromEnum(value),
+ Constant.GetElementPtr.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ });
+ }
+ return result;
+}
+
+const ConstantExtraDataTrail = struct {
+ index: Constant.Item.ExtraIndex,
+
+ fn nextMut(self: *ConstantExtraDataTrail, len: u32, comptime Item: type, builder: *Builder) []Item {
+ const items: []Item = @ptrCast(builder.constant_extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+
+ fn next(
+ self: *ConstantExtraDataTrail,
+ len: u32,
+ comptime Item: type,
+ builder: *const Builder,
+ ) []const Item {
+ const items: []const Item = @ptrCast(builder.constant_extra.items[self.index..][0..len]);
+ self.index += @intCast(len);
+ return items;
+ }
+};
+
+fn constantExtraDataTrail(
+ self: *const Builder,
+ comptime T: type,
+ index: Constant.Item.ExtraIndex,
+) struct { data: T, trail: ConstantExtraDataTrail } {
+ var result: T = undefined;
+ const fields = @typeInfo(T).Struct.fields;
+ inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, value|
+ @field(result, field.name) = switch (field.type) {
+ u32 => value,
+ Type, Constant, Function.Index, Function.Block.Index => @enumFromInt(value),
+ Constant.GetElementPtr.Info => @bitCast(value),
+ else => @compileError("bad field type: " ++ @typeName(field.type)),
+ };
+ return .{
+ .data = result,
+ .trail = .{ .index = index + @as(Constant.Item.ExtraIndex, @intCast(fields.len)) },
+ };
+}
+
+fn constantExtraData(self: *const Builder, comptime T: type, index: Constant.Item.ExtraIndex) T {
+ return self.constantExtraDataTrail(T, index).data;
+}
+
+const assert = std.debug.assert;
+const build_options = @import("build_options");
+const builtin = @import("builtin");
+const llvm = if (build_options.have_llvm)
+ @import("bindings.zig")
+else
+ @compileError("LLVM unavailable");
+const log = std.log.scoped(.llvm);
+const std = @import("std");
+
+const Allocator = std.mem.Allocator;
+const Builder = @This();
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index b093588e80..3b99ae1fe1 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -40,21 +40,42 @@ pub const Context = opaque {
pub const halfType = LLVMHalfTypeInContext;
extern fn LLVMHalfTypeInContext(C: *Context) *Type;
+ pub const bfloatType = LLVMBFloatTypeInContext;
+ extern fn LLVMBFloatTypeInContext(C: *Context) *Type;
+
pub const floatType = LLVMFloatTypeInContext;
extern fn LLVMFloatTypeInContext(C: *Context) *Type;
pub const doubleType = LLVMDoubleTypeInContext;
extern fn LLVMDoubleTypeInContext(C: *Context) *Type;
- pub const x86FP80Type = LLVMX86FP80TypeInContext;
- extern fn LLVMX86FP80TypeInContext(C: *Context) *Type;
-
pub const fp128Type = LLVMFP128TypeInContext;
extern fn LLVMFP128TypeInContext(C: *Context) *Type;
+ pub const x86_fp80Type = LLVMX86FP80TypeInContext;
+ extern fn LLVMX86FP80TypeInContext(C: *Context) *Type;
+
+ pub const ppc_fp128Type = LLVMPPCFP128TypeInContext;
+ extern fn LLVMPPCFP128TypeInContext(C: *Context) *Type;
+
+ pub const x86_amxType = LLVMX86AMXTypeInContext;
+ extern fn LLVMX86AMXTypeInContext(C: *Context) *Type;
+
+ pub const x86_mmxType = LLVMX86MMXTypeInContext;
+ extern fn LLVMX86MMXTypeInContext(C: *Context) *Type;
+
pub const voidType = LLVMVoidTypeInContext;
extern fn LLVMVoidTypeInContext(C: *Context) *Type;
+ pub const labelType = LLVMLabelTypeInContext;
+ extern fn LLVMLabelTypeInContext(C: *Context) *Type;
+
+ pub const tokenType = LLVMTokenTypeInContext;
+ extern fn LLVMTokenTypeInContext(C: *Context) *Type;
+
+ pub const metadataType = LLVMMetadataTypeInContext;
+ extern fn LLVMMetadataTypeInContext(C: *Context) *Type;
+
pub const structType = LLVMStructTypeInContext;
extern fn LLVMStructTypeInContext(
C: *Context,
@@ -114,9 +135,6 @@ pub const Value = opaque {
pub const getNextInstruction = LLVMGetNextInstruction;
extern fn LLVMGetNextInstruction(Inst: *Value) ?*Value;
- pub const typeOf = LLVMTypeOf;
- extern fn LLVMTypeOf(Val: *Value) *Type;
-
pub const setGlobalConstant = LLVMSetGlobalConstant;
extern fn LLVMSetGlobalConstant(GlobalVar: *Value, IsConstant: Bool) void;
@@ -147,36 +165,135 @@ pub const Value = opaque {
pub const setAliasee = LLVMAliasSetAliasee;
extern fn LLVMAliasSetAliasee(Alias: *Value, Aliasee: *Value) void;
- pub const constBitCast = LLVMConstBitCast;
- extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value;
+ pub const constZExtOrBitCast = LLVMConstZExtOrBitCast;
+ extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value;
- pub const constIntToPtr = LLVMConstIntToPtr;
- extern fn LLVMConstIntToPtr(ConstantVal: *Value, ToType: *Type) *Value;
+ pub const constNeg = LLVMConstNeg;
+ extern fn LLVMConstNeg(ConstantVal: *Value) *Value;
- pub const constPtrToInt = LLVMConstPtrToInt;
- extern fn LLVMConstPtrToInt(ConstantVal: *Value, ToType: *Type) *Value;
+ pub const constNSWNeg = LLVMConstNSWNeg;
+ extern fn LLVMConstNSWNeg(ConstantVal: *Value) *Value;
- pub const constShl = LLVMConstShl;
- extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value;
+ pub const constNUWNeg = LLVMConstNUWNeg;
+ extern fn LLVMConstNUWNeg(ConstantVal: *Value) *Value;
+
+ pub const constNot = LLVMConstNot;
+ extern fn LLVMConstNot(ConstantVal: *Value) *Value;
+
+ pub const constAdd = LLVMConstAdd;
+ extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNSWAdd = LLVMConstNSWAdd;
+ extern fn LLVMConstNSWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNUWAdd = LLVMConstNUWAdd;
+ extern fn LLVMConstNUWAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constSub = LLVMConstSub;
+ extern fn LLVMConstSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNSWSub = LLVMConstNSWSub;
+ extern fn LLVMConstNSWSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNUWSub = LLVMConstNUWSub;
+ extern fn LLVMConstNUWSub(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constMul = LLVMConstMul;
+ extern fn LLVMConstMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNSWMul = LLVMConstNSWMul;
+ extern fn LLVMConstNSWMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constNUWMul = LLVMConstNUWMul;
+ extern fn LLVMConstNUWMul(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constAnd = LLVMConstAnd;
+ extern fn LLVMConstAnd(LHSConstant: *Value, RHSConstant: *Value) *Value;
pub const constOr = LLVMConstOr;
extern fn LLVMConstOr(LHSConstant: *Value, RHSConstant: *Value) *Value;
+ pub const constXor = LLVMConstXor;
+ extern fn LLVMConstXor(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constShl = LLVMConstShl;
+ extern fn LLVMConstShl(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constLShr = LLVMConstLShr;
+ extern fn LLVMConstLShr(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constAShr = LLVMConstAShr;
+ extern fn LLVMConstAShr(LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+ pub const constTrunc = LLVMConstTrunc;
+ extern fn LLVMConstTrunc(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constSExt = LLVMConstSExt;
+ extern fn LLVMConstSExt(ConstantVal: *Value, ToType: *Type) *Value;
+
pub const constZExt = LLVMConstZExt;
extern fn LLVMConstZExt(ConstantVal: *Value, ToType: *Type) *Value;
- pub const constZExtOrBitCast = LLVMConstZExtOrBitCast;
- extern fn LLVMConstZExtOrBitCast(ConstantVal: *Value, ToType: *Type) *Value;
+ pub const constFPTrunc = LLVMConstFPTrunc;
+ extern fn LLVMConstFPTrunc(ConstantVal: *Value, ToType: *Type) *Value;
- pub const constNot = LLVMConstNot;
- extern fn LLVMConstNot(ConstantVal: *Value) *Value;
+ pub const constFPExt = LLVMConstFPExt;
+ extern fn LLVMConstFPExt(ConstantVal: *Value, ToType: *Type) *Value;
- pub const constAdd = LLVMConstAdd;
- extern fn LLVMConstAdd(LHSConstant: *Value, RHSConstant: *Value) *Value;
+ pub const constUIToFP = LLVMConstUIToFP;
+ extern fn LLVMConstUIToFP(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constSIToFP = LLVMConstSIToFP;
+ extern fn LLVMConstSIToFP(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constFPToUI = LLVMConstFPToUI;
+ extern fn LLVMConstFPToUI(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constFPToSI = LLVMConstFPToSI;
+ extern fn LLVMConstFPToSI(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constPtrToInt = LLVMConstPtrToInt;
+ extern fn LLVMConstPtrToInt(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constIntToPtr = LLVMConstIntToPtr;
+ extern fn LLVMConstIntToPtr(ConstantVal: *Value, ToType: *Type) *Value;
+
+ pub const constBitCast = LLVMConstBitCast;
+ extern fn LLVMConstBitCast(ConstantVal: *Value, ToType: *Type) *Value;
pub const constAddrSpaceCast = LLVMConstAddrSpaceCast;
extern fn LLVMConstAddrSpaceCast(ConstantVal: *Value, ToType: *Type) *Value;
+ pub const constSelect = LLVMConstSelect;
+ extern fn LLVMConstSelect(
+ ConstantCondition: *Value,
+ ConstantIfTrue: *Value,
+ ConstantIfFalse: *Value,
+ ) *Value;
+
+ pub const constExtractElement = LLVMConstExtractElement;
+ extern fn LLVMConstExtractElement(VectorConstant: *Value, IndexConstant: *Value) *Value;
+
+ pub const constInsertElement = LLVMConstInsertElement;
+ extern fn LLVMConstInsertElement(
+ VectorConstant: *Value,
+ ElementValueConstant: *Value,
+ IndexConstant: *Value,
+ ) *Value;
+
+ pub const constShuffleVector = LLVMConstShuffleVector;
+ extern fn LLVMConstShuffleVector(
+ VectorAConstant: *Value,
+ VectorBConstant: *Value,
+ MaskConstant: *Value,
+ ) *Value;
+
+ pub const isConstant = LLVMIsConstant;
+ extern fn LLVMIsConstant(Val: *Value) Bool;
+
+ pub const blockAddress = LLVMBlockAddress;
+ extern fn LLVMBlockAddress(F: *Value, BB: *BasicBlock) *Value;
+
pub const setWeak = LLVMSetWeak;
extern fn LLVMSetWeak(CmpXchgInst: *Value, IsWeak: Bool) void;
@@ -186,6 +303,9 @@ pub const Value = opaque {
pub const setVolatile = LLVMSetVolatile;
extern fn LLVMSetVolatile(MemoryAccessInst: *Value, IsVolatile: Bool) void;
+ pub const setAtomicSingleThread = LLVMSetAtomicSingleThread;
+ extern fn LLVMSetAtomicSingleThread(AtomicInst: *Value, SingleThread: Bool) void;
+
pub const setAlignment = LLVMSetAlignment;
extern fn LLVMSetAlignment(V: *Value, Bytes: c_uint) void;
@@ -231,17 +351,9 @@ pub const Value = opaque {
pub const addCase = LLVMAddCase;
extern fn LLVMAddCase(Switch: *Value, OnVal: *Value, Dest: *BasicBlock) void;
- pub inline fn isPoison(Val: *Value) bool {
- return LLVMIsPoison(Val).toBool();
- }
- extern fn LLVMIsPoison(Val: *Value) Bool;
-
pub const replaceAllUsesWith = LLVMReplaceAllUsesWith;
extern fn LLVMReplaceAllUsesWith(OldVal: *Value, NewVal: *Value) void;
- pub const globalGetValueType = LLVMGlobalGetValueType;
- extern fn LLVMGlobalGetValueType(Global: *Value) *Type;
-
pub const getLinkage = LLVMGetLinkage;
extern fn LLVMGetLinkage(Global: *Value) Linkage;
@@ -259,6 +371,9 @@ pub const Value = opaque {
pub const attachMetaData = ZigLLVMAttachMetaData;
extern fn ZigLLVMAttachMetaData(GlobalVar: *Value, DIG: *DIGlobalVariableExpression) void;
+
+ pub const dump = LLVMDumpValue;
+ extern fn LLVMDumpValue(Val: *Value) void;
};
pub const Type = opaque {
@@ -290,12 +405,18 @@ pub const Type = opaque {
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *Type) *Value;
+ pub const getPoison = LLVMGetPoison;
+ extern fn LLVMGetPoison(Ty: *Type) *Value;
+
pub const arrayType = LLVMArrayType;
extern fn LLVMArrayType(ElementType: *Type, ElementCount: c_uint) *Type;
pub const vectorType = LLVMVectorType;
extern fn LLVMVectorType(ElementType: *Type, ElementCount: c_uint) *Type;
+ pub const scalableVectorType = LLVMScalableVectorType;
+ extern fn LLVMScalableVectorType(ElementType: *Type, ElementCount: c_uint) *Type;
+
pub const structSetBody = LLVMStructSetBody;
extern fn LLVMStructSetBody(
StructTy: *Type,
@@ -304,23 +425,13 @@ pub const Type = opaque {
Packed: Bool,
) void;
- pub const structGetTypeAtIndex = LLVMStructGetTypeAtIndex;
- extern fn LLVMStructGetTypeAtIndex(StructTy: *Type, i: c_uint) *Type;
-
- pub const getTypeKind = LLVMGetTypeKind;
- extern fn LLVMGetTypeKind(Ty: *Type) TypeKind;
-
- pub const getElementType = LLVMGetElementType;
- extern fn LLVMGetElementType(Ty: *Type) *Type;
-
- pub const countStructElementTypes = LLVMCountStructElementTypes;
- extern fn LLVMCountStructElementTypes(StructTy: *Type) c_uint;
-
- pub const isOpaqueStruct = LLVMIsOpaqueStruct;
- extern fn LLVMIsOpaqueStruct(StructTy: *Type) Bool;
-
- pub const isSized = LLVMTypeIsSized;
- extern fn LLVMTypeIsSized(Ty: *Type) Bool;
+ pub const constGEP = LLVMConstGEP2;
+ extern fn LLVMConstGEP2(
+ Ty: *Type,
+ ConstantVal: *Value,
+ ConstantIndices: [*]const *Value,
+ NumIndices: c_uint,
+ ) *Value;
pub const constInBoundsGEP = LLVMConstInBoundsGEP2;
extern fn LLVMConstInBoundsGEP2(
@@ -329,6 +440,9 @@ pub const Type = opaque {
ConstantIndices: [*]const *Value,
NumIndices: c_uint,
) *Value;
+
+ pub const dump = LLVMDumpType;
+ extern fn LLVMDumpType(Ty: *Type) void;
};
pub const Module = opaque {
@@ -439,15 +553,18 @@ pub const VerifierFailureAction = enum(c_int) {
ReturnStatus,
};
-pub const constNeg = LLVMConstNeg;
-extern fn LLVMConstNeg(ConstantVal: *Value) *Value;
-
pub const constVector = LLVMConstVector;
extern fn LLVMConstVector(
ScalarConstantVals: [*]*Value,
Size: c_uint,
) *Value;
+pub const constICmp = LLVMConstICmp;
+extern fn LLVMConstICmp(Predicate: IntPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value;
+
+pub const constFCmp = LLVMConstFCmp;
+extern fn LLVMConstFCmp(Predicate: RealPredicate, LHSConstant: *Value, RHSConstant: *Value) *Value;
+
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
@@ -484,7 +601,7 @@ pub const Builder = opaque {
extern fn LLVMPositionBuilder(
Builder: *Builder,
Block: *BasicBlock,
- Instr: *Value,
+ Instr: ?*Value,
) void;
pub const positionBuilderAtEnd = LLVMPositionBuilderAtEnd;
@@ -678,6 +795,16 @@ pub const Builder = opaque {
pub const buildBitCast = LLVMBuildBitCast;
extern fn LLVMBuildBitCast(*Builder, Val: *Value, DestTy: *Type, Name: [*:0]const u8) *Value;
+ pub const buildGEP = LLVMBuildGEP2;
+ extern fn LLVMBuildGEP2(
+ B: *Builder,
+ Ty: *Type,
+ Pointer: *Value,
+ Indices: [*]const *Value,
+ NumIndices: c_uint,
+ Name: [*:0]const u8,
+ ) *Value;
+
pub const buildInBoundsGEP = LLVMBuildInBoundsGEP2;
extern fn LLVMBuildInBoundsGEP2(
B: *Builder,
@@ -731,14 +858,6 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *Value;
- pub const buildVectorSplat = LLVMBuildVectorSplat;
- extern fn LLVMBuildVectorSplat(
- *Builder,
- ElementCount: c_uint,
- EltVal: *Value,
- Name: [*:0]const u8,
- ) *Value;
-
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*Builder,
@@ -755,15 +874,6 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *Value;
- pub const buildStructGEP = LLVMBuildStructGEP2;
- extern fn LLVMBuildStructGEP2(
- B: *Builder,
- Ty: *Type,
- Pointer: *Value,
- Idx: c_uint,
- Name: [*:0]const u8,
- ) *Value;
-
pub const buildTrunc = LLVMBuildTrunc;
extern fn LLVMBuildTrunc(
*Builder,
@@ -1019,9 +1129,6 @@ pub const RealPredicate = enum(c_uint) {
pub const BasicBlock = opaque {
pub const deleteBasicBlock = LLVMDeleteBasicBlock;
extern fn LLVMDeleteBasicBlock(BB: *BasicBlock) void;
-
- pub const getFirstInstruction = LLVMGetFirstInstruction;
- extern fn LLVMGetFirstInstruction(BB: *BasicBlock) ?*Value;
};
pub const TargetMachine = opaque {
@@ -1071,6 +1178,9 @@ pub const TargetData = opaque {
pub const abiSizeOfType = LLVMABISizeOfType;
extern fn LLVMABISizeOfType(TD: *TargetData, Ty: *Type) c_ulonglong;
+
+ pub const stringRep = LLVMCopyStringRepOfTargetData;
+ extern fn LLVMCopyStringRepOfTargetData(TD: *TargetData) [*:0]const u8;
};
pub const CodeModel = enum(c_int) {
@@ -1440,29 +1550,6 @@ pub const AtomicRMWBinOp = enum(c_int) {
FMin,
};
-pub const TypeKind = enum(c_int) {
- Void,
- Half,
- Float,
- Double,
- X86_FP80,
- FP128,
- PPC_FP128,
- Label,
- Integer,
- Function,
- Struct,
- Array,
- Pointer,
- Vector,
- Metadata,
- X86_MMX,
- Token,
- ScalableVector,
- BFloat,
- X86_AMX,
-};
-
pub const CallConv = enum(c_uint) {
C = 0,
Fast = 8,
@@ -1588,6 +1675,13 @@ pub const address_space = struct {
pub const constant_buffer_14: c_uint = 22;
pub const constant_buffer_15: c_uint = 23;
};
+
+ // See llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypetilities.h
+ pub const wasm = struct {
+ pub const variable: c_uint = 1;
+ pub const externref: c_uint = 10;
+ pub const funcref: c_uint = 20;
+ };
};
pub const DIEnumerator = opaque {};
diff --git a/src/link.zig b/src/link.zig
index eb6c085663..262718e5af 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -110,6 +110,7 @@ pub const Options = struct {
/// other objects.
/// Otherwise (depending on `use_lld`) this link code directly outputs and updates the final binary.
use_llvm: bool,
+ use_lib_llvm: bool,
link_libc: bool,
link_libcpp: bool,
link_libunwind: bool,
diff --git a/src/main.zig b/src/main.zig
index c92c69e173..134b566bdc 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -439,6 +439,8 @@ const usage_build_generic =
\\ -fno-unwind-tables Never produce unwind table entries
\\ -fLLVM Force using LLVM as the codegen backend
\\ -fno-LLVM Prevent using LLVM as the codegen backend
+ \\ -flibLLVM Force using the LLVM API in the codegen backend
+ \\ -fno-libLLVM Prevent using the LLVM API in the codegen backend
\\ -fClang Force using Clang as the C/C++ compilation backend
\\ -fno-Clang Prevent using Clang as the C/C++ compilation backend
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
@@ -821,6 +823,7 @@ fn buildOutputType(
var stack_size_override: ?u64 = null;
var image_base_override: ?u64 = null;
var use_llvm: ?bool = null;
+ var use_lib_llvm: ?bool = null;
var use_lld: ?bool = null;
var use_clang: ?bool = null;
var link_eh_frame_hdr = false;
@@ -1261,6 +1264,10 @@ fn buildOutputType(
use_llvm = true;
} else if (mem.eql(u8, arg, "-fno-LLVM")) {
use_llvm = false;
+ } else if (mem.eql(u8, arg, "-flibLLVM")) {
+ use_lib_llvm = true;
+ } else if (mem.eql(u8, arg, "-fno-libLLVM")) {
+ use_lib_llvm = false;
} else if (mem.eql(u8, arg, "-fLLD")) {
use_lld = true;
} else if (mem.eql(u8, arg, "-fno-LLD")) {
@@ -3119,6 +3126,7 @@ fn buildOutputType(
.want_tsan = want_tsan,
.want_compiler_rt = want_compiler_rt,
.use_llvm = use_llvm,
+ .use_lib_llvm = use_lib_llvm,
.use_lld = use_lld,
.use_clang = use_clang,
.hash_style = hash_style,
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index b04356bf4d..26ea04aca6 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -560,10 +560,6 @@ LLVMValueRef ZigLLVMBuildUShlSat(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRe
return wrap(call_inst);
}
-LLVMValueRef LLVMBuildVectorSplat(LLVMBuilderRef B, unsigned elem_count, LLVMValueRef V, const char *Name) {
- return wrap(unwrap(B)->CreateVectorSplat(elem_count, unwrap(V), Name));
-}
-
void ZigLLVMFnSetSubprogram(LLVMValueRef fn, ZigLLVMDISubprogram *subprogram) {
assert( isa<Function>(unwrap(fn)) );
Function *unwrapped_function = reinterpret_cast<Function*>(unwrap(fn));