diff options
Diffstat (limited to 'src/codegen/llvm.zig')
| -rw-r--r-- | src/codegen/llvm.zig | 8224 |
1 files changed, 4363 insertions, 3861 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4960414499..57842ef1e0 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7,7 +7,11 @@ const math = std.math; const native_endian = builtin.cpu.arch.endian(); const DW = std.dwarf; -const llvm = @import("llvm/bindings.zig"); +const Builder = @import("llvm/Builder.zig"); +const llvm = if (build_options.have_llvm or true) + @import("llvm/bindings.zig") +else + @compileError("LLVM unavailable"); const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); @@ -34,7 +38,7 @@ const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev; const Error = error{ OutOfMemory, CodegenFail }; -pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { +pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 { var llvm_triple = std.ArrayList(u8).init(allocator); defer llvm_triple.deinit(); @@ -207,7 +211,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 { }; try llvm_triple.appendSlice(llvm_abi); - return llvm_triple.toOwnedSliceSentinel(0); + return llvm_triple.toOwnedSlice(); } pub fn targetOs(os_tag: std.Target.Os.Tag) llvm.OSType { @@ -327,17 +331,363 @@ pub fn supportsTailCall(target: std.Target) bool { } } -/// TODO can this be done with simpler logic / different API binding? -fn deleteLlvmGlobal(llvm_global: *llvm.Value) void { - if (llvm_global.globalGetValueType().getTypeKind() == .Function) { - llvm_global.deleteFunction(); - return; +const DataLayoutBuilder = struct { + target: std.Target, + + pub fn format( + self: DataLayoutBuilder, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + const is_aarch64_windows = self.target.cpu.arch == .aarch64 and self.target.os.tag == .windows; + try writer.writeByte(switch (self.target.cpu.arch.endian()) { + .Little => 'e', + .Big => 'E', + }); + switch (self.target.cpu.arch) { + .amdgcn, + .nvptx, + .nvptx64, + => {}, + .avr => try writer.writeAll("-P1"), + else => try writer.print("-m:{c}", .{@as(u8, switch (self.target.cpu.arch) { + .mips, .mipsel => 'm', // Mips mangling: Private symbols get a $ prefix. + else => switch (self.target.ofmt) { + .elf => 'e', // ELF mangling: Private symbols get a `.L` prefix. + //.goff => 'l', // GOFF mangling: Private symbols get a `@` prefix. + .macho => 'o', // Mach-O mangling: Private symbols get `L` prefix. + // Other symbols get a `_` prefix. + .coff => switch (self.target.os.tag) { + .windows => switch (self.target.cpu.arch) { + .x86 => 'x', // Windows x86 COFF mangling: Private symbols get the usual + // prefix. Regular C symbols get a `_` prefix. Functions with `__stdcall`, + //`__fastcall`, and `__vectorcall` have custom mangling that appends `@N` + // where N is the number of bytes used to pass parameters. C++ symbols + // starting with `?` are not mangled in any way. + else => 'w', // Windows COFF mangling: Similar to x, except that normal C + // symbols do not receive a `_` prefix. + }, + else => 'e', + }, + //.xcoff => 'a', // XCOFF mangling: Private symbols get a `L..` prefix. + else => 'e', + }, + })}), + } + var any_non_integral = false; + const ptr_bit_width = self.target.ptrBitWidth(); + var default_info = struct { size: u16, abi: u16, pref: u16, idx: u16 }{ + .size = 64, + .abi = 64, + .pref = 64, + .idx = 64, + }; + const addr_space_info = llvmAddrSpaceInfo(self.target); + for (addr_space_info, 0..) |info, i| { + assert((info.llvm == .default) == (i == 0)); + if (info.non_integral) { + assert(info.llvm != .default); + any_non_integral = true; + } + const size = info.size orelse ptr_bit_width; + const abi = info.abi orelse ptr_bit_width; + const pref = info.pref orelse abi; + const idx = info.idx orelse size; + const matches_default = + size == default_info.size and + abi == default_info.abi and + pref == default_info.pref and + idx == default_info.idx; + if (info.llvm == .default) default_info = .{ + .size = size, + .abi = abi, + .pref = pref, + .idx = idx, + }; + if (self.target.cpu.arch == .aarch64_32) continue; + if (!info.force_in_data_layout and matches_default and + self.target.cpu.arch != .riscv64 and !is_aarch64_windows and + self.target.cpu.arch != .bpfeb and self.target.cpu.arch != .bpfel) continue; + try writer.writeAll("-p"); + if (info.llvm != .default) try writer.print("{d}", .{@intFromEnum(info.llvm)}); + try writer.print(":{d}:{d}", .{ size, abi }); + if (pref != abi or idx != size or self.target.cpu.arch == .hexagon) { + try writer.print(":{d}", .{pref}); + if (idx != size) try writer.print(":{d}", .{idx}); + } + } + if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) + try writer.writeAll("-Fi8"); // for thumb interwork + if (self.target.cpu.arch != .hexagon) { + if (self.target.cpu.arch == .s390x) try self.typeAlignment(.integer, 1, 8, 8, false, writer); + try self.typeAlignment(.integer, 8, 8, 8, false, writer); + try self.typeAlignment(.integer, 16, 16, 16, false, writer); + try self.typeAlignment(.integer, 32, if (is_aarch64_windows) 0 else 32, 32, false, writer); + try self.typeAlignment(.integer, 64, 32, 64, false, writer); + try self.typeAlignment(.integer, 128, 32, 64, false, writer); + if (backendSupportsF16(self.target)) try self.typeAlignment(.float, 16, 16, 16, false, writer); + try self.typeAlignment(.float, 32, 32, 32, false, writer); + try self.typeAlignment(.float, 64, 64, 64, false, writer); + if (backendSupportsF80(self.target)) try self.typeAlignment(.float, 80, 0, 0, false, writer); + try self.typeAlignment(.float, 128, 128, 128, false, writer); + } + switch (self.target.cpu.arch) { + .amdgcn => { + try self.typeAlignment(.vector, 16, 16, 16, false, writer); + try self.typeAlignment(.vector, 24, 32, 32, false, writer); + try self.typeAlignment(.vector, 32, 32, 32, false, writer); + try self.typeAlignment(.vector, 48, 64, 64, false, writer); + try self.typeAlignment(.vector, 96, 128, 128, false, writer); + try self.typeAlignment(.vector, 192, 256, 256, false, writer); + try self.typeAlignment(.vector, 256, 256, 256, false, writer); + try self.typeAlignment(.vector, 512, 512, 512, false, writer); + try self.typeAlignment(.vector, 1024, 1024, 1024, false, writer); + try self.typeAlignment(.vector, 2048, 2048, 2048, false, writer); + }, + .ve => {}, + else => { + try self.typeAlignment(.vector, 16, 32, 32, false, writer); + try self.typeAlignment(.vector, 32, 32, 32, false, writer); + try self.typeAlignment(.vector, 64, 64, 64, false, writer); + try self.typeAlignment(.vector, 128, 128, 128, true, writer); + }, + } + if (self.target.os.tag != .windows and self.target.cpu.arch != .avr) + try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + for (@as([]const u24, switch (self.target.cpu.arch) { + .avr => &.{8}, + .msp430 => &.{ 8, 16 }, + .arm, + .armeb, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .riscv32, + .sparc, + .sparcel, + .thumb, + .thumbeb, + => &.{32}, + .aarch64, + .aarch64_be, + .aarch64_32, + .amdgcn, + .bpfeb, + .bpfel, + .mips64, + .mips64el, + .powerpc64, + .powerpc64le, + .riscv64, + .s390x, + .sparc64, + .ve, + .wasm32, + .wasm64, + => &.{ 32, 64 }, + .hexagon => &.{ 16, 32 }, + .x86 => &.{ 8, 16, 32 }, + .nvptx, + .nvptx64, + => &.{ 16, 32, 64 }, + .x86_64 => &.{ 8, 16, 32, 64 }, + else => &.{}, + }), 0..) |natural, index| switch (index) { + 0 => try writer.print("-n{d}", .{natural}), + else => try writer.print(":{d}", .{natural}), + }; + if (self.target.cpu.arch == .hexagon) { + try self.typeAlignment(.integer, 64, 64, 64, true, writer); + try self.typeAlignment(.integer, 32, 32, 32, true, writer); + try self.typeAlignment(.integer, 16, 16, 16, true, writer); + try self.typeAlignment(.integer, 1, 8, 8, true, writer); + try self.typeAlignment(.float, 32, 32, 32, true, writer); + try self.typeAlignment(.float, 64, 64, 64, true, writer); + } + if (self.target.os.tag == .windows or self.target.cpu.arch == .avr) + try self.typeAlignment(.aggregate, 0, 0, 64, false, writer); + const stack_abi = self.target.stackAlignment() * 8; + if (self.target.os.tag == .windows or self.target.cpu.arch == .msp430 or + stack_abi != ptr_bit_width) + try writer.print("-S{d}", .{stack_abi}); + switch (self.target.cpu.arch) { + .hexagon, .ve => { + try self.typeAlignment(.vector, 32, 128, 128, true, writer); + try self.typeAlignment(.vector, 64, 128, 128, true, writer); + try self.typeAlignment(.vector, 128, 128, 128, true, writer); + }, + else => {}, + } + if (self.target.cpu.arch != .amdgcn) { + try self.typeAlignment(.vector, 256, 128, 128, true, writer); + try self.typeAlignment(.vector, 512, 128, 128, true, writer); + try self.typeAlignment(.vector, 1024, 128, 128, true, writer); + try self.typeAlignment(.vector, 2048, 128, 128, true, writer); + try self.typeAlignment(.vector, 4096, 128, 128, true, writer); + try self.typeAlignment(.vector, 8192, 128, 128, true, writer); + try self.typeAlignment(.vector, 16384, 128, 128, true, writer); + } + const alloca_addr_space = llvmAllocaAddressSpace(self.target); + if (alloca_addr_space != .default) try writer.print("-A{d}", .{@intFromEnum(alloca_addr_space)}); + const global_addr_space = llvmDefaultGlobalAddressSpace(self.target); + if (global_addr_space != .default) try writer.print("-G{d}", .{@intFromEnum(global_addr_space)}); + if (any_non_integral) { + try writer.writeAll("-ni"); + for (addr_space_info) |info| if (info.non_integral) + try writer.print(":{d}", .{@intFromEnum(info.llvm)}); + } + } + + fn typeAlignment( + self: DataLayoutBuilder, + kind: enum { integer, vector, float, aggregate }, + size: u24, + default_abi: u24, + default_pref: u24, + default_force_pref: bool, + writer: anytype, + ) @TypeOf(writer).Error!void { + var abi = default_abi; + var pref = default_pref; + var force_abi = false; + var force_pref = default_force_pref; + if (kind == .float and size == 80) { + abi = 128; + pref = 128; + } + for (@as([]const std.Target.CType, switch (kind) { + .integer => &.{ .char, .short, .int, .long, .longlong }, + .float => &.{ .float, .double, .longdouble }, + .vector, .aggregate => &.{}, + })) |cty| { + if (self.target.c_type_bit_size(cty) != size) continue; + abi = self.target.c_type_alignment(cty) * 8; + pref = self.target.c_type_preferred_alignment(cty) * 8; + break; + } + switch (kind) { + .integer => { + if (self.target.ptrBitWidth() <= 16 and size >= 128) return; + abi = @min(abi, self.target.maxIntAlignment() * 8); + switch (self.target.os.tag) { + .linux => switch (self.target.cpu.arch) { + .aarch64, + .aarch64_be, + .aarch64_32, + .mips, + .mipsel, + => pref = @max(pref, 32), + else => {}, + }, + else => {}, + } + switch (self.target.cpu.arch) { + .aarch64, + .aarch64_be, + .aarch64_32, + .bpfeb, + .bpfel, + .nvptx, + .nvptx64, + .riscv64, + => if (size == 128) { + abi = size; + pref = size; + }, + .hexagon => force_abi = true, + .mips64, + .mips64el, + => if (size <= 32) { + pref = 32; + }, + .s390x => if (size <= 16) { + pref = 16; + }, + .ve => if (size == 64) { + abi = size; + pref = size; + }, + else => {}, + } + }, + .vector => if (self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) { + switch (size) { + 128 => abi = 64, + else => {}, + } + } else if ((self.target.cpu.arch.isPPC64() and (size == 256 or size == 512)) or + (self.target.cpu.arch.isNvptx() and (size == 16 or size == 32))) + { + force_abi = true; + abi = size; + pref = size; + } else if (self.target.cpu.arch == .amdgcn and size <= 2048) { + force_abi = true; + } else if (self.target.cpu.arch == .hexagon and + ((size >= 32 and size <= 64) or (size >= 512 and size <= 2048))) + { + abi = size; + pref = size; + force_pref = true; + } else if (self.target.cpu.arch == .s390x and size == 128) { + abi = 64; + pref = 64; + force_pref = false; + } else if (self.target.cpu.arch == .ve and (size >= 64 and size <= 16384)) { + abi = 64; + pref = 64; + force_abi = true; + force_pref = true; + }, + .float => switch (self.target.cpu.arch) { + .avr, .msp430, .sparc64 => if (size != 32 and size != 64) return, + .hexagon => if (size == 32 or size == 64) { + force_abi = true; + }, + .aarch64_32 => if (size == 128) { + abi = size; + pref = size; + }, + .ve => if (size == 64) { + abi = size; + pref = size; + }, + else => {}, + }, + .aggregate => if (self.target.os.tag == .windows or + self.target.cpu.arch.isARM() or self.target.cpu.arch.isThumb()) + { + pref = @min(pref, self.target.ptrBitWidth()); + } else if (self.target.cpu.arch == .hexagon) { + abi = 0; + pref = 0; + } else if (self.target.cpu.arch == .s390x) { + abi = 8; + pref = 16; + } else if (self.target.cpu.arch == .msp430) { + abi = 8; + pref = 8; + }, + } + if (kind != .vector and self.target.cpu.arch == .avr) { + force_abi = true; + abi = 8; + pref = 8; + } + if (!force_abi and abi == default_abi and pref == default_pref) return; + try writer.print("-{c}", .{@tagName(kind)[0]}); + if (size != 0) try writer.print("{d}", .{size}); + try writer.print(":{d}", .{abi}); + if (pref != abi or force_pref) try writer.print(":{d}", .{pref}); } - return llvm_global.deleteGlobal(); -} +}; pub const Object = struct { gpa: Allocator, + builder: Builder, + module: *Module, llvm_module: *llvm.Module, di_builder: ?*llvm.DIBuilder, @@ -347,7 +697,6 @@ pub const Object = struct { /// - *Module.Decl (Non-Fn) => *DIGlobalVariable di_map: std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DINode), di_compile_unit: ?*llvm.DICompileUnit, - context: *llvm.Context, target_machine: *llvm.TargetMachine, target_data: *llvm.TargetData, target: std.Target, @@ -359,9 +708,9 @@ pub const Object = struct { /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. - decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), + decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Global.Index), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. - named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), + named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, Builder.Function.Index), /// Maps Zig types to LLVM types. The table memory is backed by the GPA of /// the compiler. /// TODO when InternPool garbage collection is implemented, this map needs @@ -371,16 +720,16 @@ pub const Object = struct { /// The LLVM global table which holds the names corresponding to Zig errors. /// Note that the values are not added until flushModule, when all errors in /// the compilation are known. - error_name_table: ?*llvm.Value, + error_name_table: Builder.Variable.Index, /// This map is usually very close to empty. It tracks only the cases when a /// second extern Decl could not be emitted with the correct name due to a /// name collision. extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void), /// Memoizes a null `?usize` value. - null_opt_addr: ?*llvm.Value, + null_opt_usize: Builder.Constant, - pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type); + pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, Builder.Type); /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. @@ -394,138 +743,137 @@ pub const Object = struct { } pub fn init(gpa: Allocator, options: link.Options) !Object { - const context = llvm.Context.create(); - errdefer context.dispose(); - - initializeLLVMTarget(options.target.cpu.arch); - - const llvm_module = llvm.Module.createWithName(options.root_name.ptr, context); - errdefer llvm_module.dispose(); - const llvm_target_triple = try targetTriple(gpa, options.target); defer gpa.free(llvm_target_triple); - var error_message: [*:0]const u8 = undefined; - var target: *llvm.Target = undefined; - if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) { - defer llvm.disposeMessage(error_message); - - log.err("LLVM failed to parse '{s}': {s}", .{ llvm_target_triple, error_message }); - return error.InvalidLlvmTriple; - } + var builder = try Builder.init(.{ + .allocator = gpa, + .use_lib_llvm = options.use_lib_llvm, + .strip = options.strip, + .name = options.root_name, + .target = options.target, + .triple = llvm_target_triple, + }); + errdefer builder.deinit(); + + var target_machine: *llvm.TargetMachine = undefined; + var target_data: *llvm.TargetData = undefined; + if (builder.useLibLlvm()) { + if (!options.strip) { + switch (options.target.ofmt) { + .coff => builder.llvm.module.?.addModuleCodeViewFlag(), + else => builder.llvm.module.?.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), + } + builder.llvm.di_builder = builder.llvm.module.?.createDIBuilder(true); + + // Don't use the version string here; LLVM misparses it when it + // includes the git revision. + const producer = try builder.fmt("zig {d}.{d}.{d}", .{ + build_options.semver.major, + build_options.semver.minor, + build_options.semver.patch, + }); - llvm_module.setTarget(llvm_target_triple.ptr); - var opt_di_builder: ?*llvm.DIBuilder = null; - errdefer if (opt_di_builder) |di_builder| di_builder.dispose(); + // We fully resolve all paths at this point to avoid lack of source line info in stack + // traces or lack of debugging information which, if relative paths were used, would + // be very location dependent. + // TODO: the only concern I have with this is WASI as either host or target, should + // we leave the paths as relative then? + var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; + const compile_unit_dir = blk: { + const path = d: { + const mod = options.module orelse break :d "."; + break :d mod.root_pkg.root_src_directory.path orelse "."; + }; + if (std.fs.path.isAbsolute(path)) break :blk path; + break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was + }; + const compile_unit_dir_z = try builder.gpa.dupeZ(u8, compile_unit_dir); + defer builder.gpa.free(compile_unit_dir_z); + + builder.llvm.di_compile_unit = builder.llvm.di_builder.?.createCompileUnit( + DW.LANG.C99, + builder.llvm.di_builder.?.createFile(options.root_name, compile_unit_dir_z), + producer.toSlice(&builder).?, + options.optimize_mode != .Debug, + "", // flags + 0, // runtime version + "", // split name + 0, // dwo id + true, // emit debug info + ); + } - var di_compile_unit: ?*llvm.DICompileUnit = null; + const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) + .None + else + .Aggressive; - if (!options.strip) { - switch (options.target.ofmt) { - .coff => llvm_module.addModuleCodeViewFlag(), - else => llvm_module.addModuleDebugInfoFlag(options.dwarf_format == std.dwarf.Format.@"64"), - } - const di_builder = llvm_module.createDIBuilder(true); - opt_di_builder = di_builder; - - // Don't use the version string here; LLVM misparses it when it - // includes the git revision. - const producer = try std.fmt.allocPrintZ(gpa, "zig {d}.{d}.{d}", .{ - build_options.semver.major, - build_options.semver.minor, - build_options.semver.patch, - }); - defer gpa.free(producer); - - // We fully resolve all paths at this point to avoid lack of source line info in stack - // traces or lack of debugging information which, if relative paths were used, would - // be very location dependent. - // TODO: the only concern I have with this is WASI as either host or target, should - // we leave the paths as relative then? - var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; - const compile_unit_dir = blk: { - const path = d: { - const mod = options.module orelse break :d "."; - break :d mod.root_pkg.root_src_directory.path orelse "."; - }; - if (std.fs.path.isAbsolute(path)) break :blk path; - break :blk std.os.realpath(path, &buf) catch path; // If realpath fails, fallback to whatever path was + const reloc_mode: llvm.RelocMode = if (options.pic) + .PIC + else if (options.link_mode == .Dynamic) + llvm.RelocMode.DynamicNoPIC + else + .Static; + + const code_model: llvm.CodeModel = switch (options.machine_code_model) { + .default => .Default, + .tiny => .Tiny, + .small => .Small, + .kernel => .Kernel, + .medium => .Medium, + .large => .Large, }; - const compile_unit_dir_z = try gpa.dupeZ(u8, compile_unit_dir); - defer gpa.free(compile_unit_dir_z); - - di_compile_unit = di_builder.createCompileUnit( - DW.LANG.C99, - di_builder.createFile(options.root_name, compile_unit_dir_z), - producer, - options.optimize_mode != .Debug, - "", // flags - 0, // runtime version - "", // split name - 0, // dwo id - true, // emit debug info - ); - } - const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) - .None - else - .Aggressive; + // TODO handle float ABI better- it should depend on the ABI portion of std.Target + const float_abi: llvm.ABIType = .Default; + + target_machine = llvm.TargetMachine.create( + builder.llvm.target.?, + builder.target_triple.toSlice(&builder).?, + if (options.target.cpu.model.llvm_name) |s| s.ptr else null, + options.llvm_cpu_features, + opt_level, + reloc_mode, + code_model, + options.function_sections, + float_abi, + if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null, + ); + errdefer target_machine.dispose(); - const reloc_mode: llvm.RelocMode = if (options.pic) - .PIC - else if (options.link_mode == .Dynamic) - llvm.RelocMode.DynamicNoPIC - else - .Static; - - const code_model: llvm.CodeModel = switch (options.machine_code_model) { - .default => .Default, - .tiny => .Tiny, - .small => .Small, - .kernel => .Kernel, - .medium => .Medium, - .large => .Large, - }; + target_data = target_machine.createTargetDataLayout(); + errdefer target_data.dispose(); - // TODO handle float ABI better- it should depend on the ABI portion of std.Target - const float_abi: llvm.ABIType = .Default; - - const target_machine = llvm.TargetMachine.create( - target, - llvm_target_triple.ptr, - if (options.target.cpu.model.llvm_name) |s| s.ptr else null, - options.llvm_cpu_features, - opt_level, - reloc_mode, - code_model, - options.function_sections, - float_abi, - if (target_util.llvmMachineAbi(options.target)) |s| s.ptr else null, - ); - errdefer target_machine.dispose(); + builder.llvm.module.?.setModuleDataLayout(target_data); - const target_data = target_machine.createTargetDataLayout(); - errdefer target_data.dispose(); + if (options.pic) builder.llvm.module.?.setModulePICLevel(); + if (options.pie) builder.llvm.module.?.setModulePIELevel(); + if (code_model != .Default) builder.llvm.module.?.setModuleCodeModel(code_model); - llvm_module.setModuleDataLayout(target_data); - - if (options.pic) llvm_module.setModulePICLevel(); - if (options.pie) llvm_module.setModulePIELevel(); - if (code_model != .Default) llvm_module.setModuleCodeModel(code_model); + if (options.opt_bisect_limit >= 0) { + builder.llvm.context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); + } - if (options.opt_bisect_limit >= 0) { - context.setOptBisectLimit(std.math.lossyCast(c_int, options.opt_bisect_limit)); + builder.data_layout = try builder.fmt("{}", .{DataLayoutBuilder{ .target = options.target }}); + if (std.debug.runtime_safety) { + const rep = target_data.stringRep(); + defer llvm.disposeMessage(rep); + std.testing.expectEqualStrings( + std.mem.span(rep), + builder.data_layout.toSlice(&builder).?, + ) catch unreachable; + } } - return Object{ + return .{ .gpa = gpa, + .builder = builder, .module = options.module.?, - .llvm_module = llvm_module, + .llvm_module = builder.llvm.module.?, .di_map = .{}, - .di_builder = opt_di_builder, - .di_compile_unit = di_compile_unit, - .context = context, + .di_builder = builder.llvm.di_builder, + .di_compile_unit = builder.llvm.di_compile_unit, .target_machine = target_machine, .target_data = target_data, .target = options.target, @@ -533,22 +881,17 @@ pub const Object = struct { .named_enum_map = .{}, .type_map = .{}, .di_type_map = .{}, - .error_name_table = null, + .error_name_table = .none, .extern_collisions = .{}, - .null_opt_addr = null, + .null_opt_usize = .no_init, }; } pub fn deinit(self: *Object, gpa: Allocator) void { - if (self.di_builder) |dib| { - dib.dispose(); - self.di_map.deinit(gpa); - self.di_type_map.deinit(gpa); - } + self.di_map.deinit(gpa); + self.di_type_map.deinit(gpa); self.target_data.dispose(); self.target_machine.dispose(); - self.llvm_module.dispose(); - self.context.dispose(); self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); @@ -572,85 +915,108 @@ pub const Object = struct { return slice.ptr; } - fn genErrorNameTable(o: *Object) !void { + fn genErrorNameTable(o: *Object) Allocator.Error!void { // If o.error_name_table is null, there was no instruction that actually referenced the error table. - const error_name_table_ptr_global = o.error_name_table orelse return; + const error_name_table_ptr_global = o.error_name_table; + if (error_name_table_ptr_global == .none) return; const mod = o.module; - const target = mod.getTarget(); - - const llvm_ptr_ty = o.context.pointerType(0); // TODO: Address space - const llvm_usize_ty = o.context.intType(target.ptrBitWidth()); - const type_fields = [_]*llvm.Type{ - llvm_ptr_ty, - llvm_usize_ty, - }; - const llvm_slice_ty = o.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.slice_const_u8_sentinel_0; - const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.global_error_set.keys(); - const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); + const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len); defer mod.gpa.free(llvm_errors); - llvm_errors[0] = llvm_slice_ty.getUndef(); + // TODO: Address space + const slice_ty = Type.slice_const_u8_sentinel_0; + const slice_alignment = slice_ty.abiAlignment(mod); + const llvm_usize_ty = try o.lowerType(Type.usize); + const llvm_slice_ty = try o.lowerType(slice_ty); + const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty); + + llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty); for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { - const name = mod.intern_pool.stringToSlice(name_nts); - const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); - const str_global = o.llvm_module.addGlobal(str_init.typeOf(), ""); - str_global.setInitializer(str_init); - str_global.setLinkage(.Private); - str_global.setGlobalConstant(.True); - str_global.setUnnamedAddr(.True); - str_global.setAlignment(1); - - const slice_fields = [_]*llvm.Value{ - str_global, - llvm_usize_ty.constInt(name.len, .False), + const name = try o.builder.string(mod.intern_pool.stringToSlice(name_nts)); + const str_init = try o.builder.stringNullConst(name); + const str_ty = str_init.typeOf(&o.builder); + const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); + str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); + str_llvm_global.setLinkage(.Private); + str_llvm_global.setGlobalConstant(.True); + str_llvm_global.setUnnamedAddr(.True); + str_llvm_global.setAlignment(1); + + var str_global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = str_ty, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len); - } + var str_variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = str_init, + .alignment = comptime Builder.Alignment.fromByteUnits(1), + }; + try o.builder.llvm.globals.append(o.gpa, str_llvm_global); + const global_index = try o.builder.addGlobal(.empty, str_global); + try o.builder.variables.append(o.gpa, str_variable); - const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len))); + llvm_error.* = try o.builder.structConst(llvm_slice_ty, &.{ + global_index.toConst(), + try o.builder.intConst(llvm_usize_ty, name.toSlice(&o.builder).?.len), + }); + } - const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), ""); - error_name_table_global.setInitializer(error_name_table_init); + const error_name_table_init = try o.builder.arrayConst(llvm_table_ty, llvm_errors); + const error_name_table_global = o.llvm_module.addGlobal(llvm_table_ty.toLlvm(&o.builder), ""); + error_name_table_global.setInitializer(error_name_table_init.toLlvm(&o.builder)); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); // TODO: Dont hardcode + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = llvm_table_ty, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = error_name_table_init, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), + }; + try o.builder.llvm.globals.append(o.gpa, error_name_table_global); + _ = try o.builder.addGlobal(.empty, global); + try o.builder.variables.append(o.gpa, variable); + const error_name_table_ptr = error_name_table_global; - error_name_table_ptr_global.setInitializer(error_name_table_ptr); + error_name_table_ptr_global.ptr(&o.builder).init = variable.global.toConst(); + error_name_table_ptr_global.toLlvm(&o.builder).setInitializer(error_name_table_ptr); } - fn genCmpLtErrorsLenFunction(object: *Object) !void { + fn genCmpLtErrorsLenFunction(o: *Object) !void { // If there is no such function in the module, it means the source code does not need it. - const llvm_fn = object.llvm_module.getNamedFunction(lt_errors_fn_name) orelse return; - const mod = object.module; + const name = o.builder.stringIfExists(lt_errors_fn_name) orelse return; + const llvm_fn = o.builder.getGlobal(name) orelse return; + const mod = o.module; const errors_len = mod.global_error_set.count(); - // Delete previous implementation. We replace it with every flush() because the - // total number of errors may have changed. - while (llvm_fn.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); - } - - const builder = object.context.createBuilder(); - - const entry_block = object.context.appendBasicBlock(llvm_fn, "Entry"); - builder.positionBuilderAtEnd(entry_block); - builder.clearCurrentDebugLocation(); + var wip = try Builder.WipFunction.init(&o.builder, llvm_fn.ptrConst(&o.builder).kind.function); + defer wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; // Example source of the following LLVM IR: // fn __zig_lt_errors_len(index: u16) bool { // return index < total_errors_len; // } - const lhs = llvm_fn.getParam(0); - const rhs = lhs.typeOf().constInt(errors_len, .False); - const is_lt = builder.buildICmp(.ULT, lhs, rhs, ""); - _ = builder.buildRet(is_lt); + const lhs = wip.arg(0); + const rhs = try o.builder.intValue(Builder.Type.err_int, errors_len); + const is_lt = try wip.icmp(.ult, lhs, rhs, ""); + _ = try wip.ret(is_lt); + try wip.finish(); } fn genModuleLevelAssembly(object: *Object) !void { @@ -671,34 +1037,28 @@ pub const Object = struct { // This map has externs with incorrect symbol names. for (object.extern_collisions.keys()) |decl_index| { - const entry = object.decl_map.getEntry(decl_index) orelse continue; - const llvm_global = entry.value_ptr.*; + const global = object.decl_map.get(decl_index) orelse continue; // Same logic as below but for externs instead of exports. - const decl = mod.declPtr(decl_index); - const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue; - if (other_global == llvm_global) continue; + const decl_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue; + const other_global = object.builder.getGlobal(decl_name) orelse continue; + if (other_global.eql(global, &object.builder)) continue; - llvm_global.replaceAllUsesWith(other_global); - deleteLlvmGlobal(llvm_global); - entry.value_ptr.* = other_global; + try global.replace(other_global, &object.builder); } object.extern_collisions.clearRetainingCapacity(); - const export_keys = mod.decl_exports.keys(); - for (mod.decl_exports.values(), 0..) |export_list, i| { - const decl_index = export_keys[i]; - const llvm_global = object.decl_map.get(decl_index) orelse continue; + for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { + const global = object.decl_map.get(decl_index) orelse continue; for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. - const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); + const exp_name = object.builder.stringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue; - const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue; - if (other_global == llvm_global) continue; + const other_global = object.builder.getGlobal(exp_name) orelse continue; + if (other_global.eql(global, &object.builder)) continue; - other_global.replaceAllUsesWith(llvm_global); - llvm_global.takeName(other_global); - deleteLlvmGlobal(other_global); + try global.takeName(other_global, &object.builder); + try other_global.replace(global, &object.builder); // Problem: now we need to replace in the decl_map that // the extern decl index points to this new global. However we don't // know the decl index. @@ -744,20 +1104,9 @@ pub const Object = struct { if (comp.verbose_llvm_ir) |path| { if (std.mem.eql(u8, path, "-")) { - self.llvm_module.dump(); + self.builder.dump(); } else { - const path_z = try comp.gpa.dupeZ(u8, path); - defer comp.gpa.free(path_z); - - var error_message: [*:0]const u8 = undefined; - - if (self.llvm_module.printModuleToFile(path_z, &error_message).toBool()) { - defer llvm.disposeMessage(error_message); - - log.err("dump LLVM module failed ir={s}: {s}", .{ - path, error_message, - }); - } + _ = try self.builder.printToFile(path); } } @@ -884,7 +1233,9 @@ pub const Object = struct { .err_msg = null, }; - const llvm_func = try o.resolveLlvmFunction(decl_index); + const function = try o.resolveLlvmFunction(decl_index); + const global = function.ptrConst(&o.builder).global; + const llvm_func = global.toLlvm(&o.builder); if (func.analysis(ip).is_noinline) { o.addFnAttr(llvm_func, "noinline"); @@ -921,24 +1272,27 @@ pub const Object = struct { o.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| + if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| { + function.ptr(&o.builder).section = try o.builder.string(section); llvm_func.setSection(section); - - // Remove all the basic blocks of a function in order to start over, generating - // LLVM IR from an empty function body. - while (llvm_func.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); } - const builder = o.context.createBuilder(); + var deinit_wip = true; + var wip = try Builder.WipFunction.init(&o.builder, function); + defer if (deinit_wip) wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; - const entry_block = o.context.appendBasicBlock(llvm_func, "Entry"); - builder.positionBuilderAtEnd(entry_block); + const builder = wip.llvm.builder; + var llvm_arg_i: u32 = 0; // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = mod.typeToFunc(decl.ty).?; const sret = firstParamSRet(fn_info, mod); - const ret_ptr = if (sret) llvm_func.getParam(0) else null; + const ret_ptr: Builder.Value = if (sret) param: { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + break :param param; + } else .none; const gpa = o.gpa; if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { @@ -949,207 +1303,183 @@ pub const Object = struct { const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; - const err_ret_trace = if (err_return_tracing) - llvm_func.getParam(@intFromBool(ret_ptr != null)) - else - null; + const err_ret_trace: Builder.Value = if (err_return_tracing) param: { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + break :param param; + } else .none; // This is the list of args we will use that correspond directly to the AIR arg // instructions. Depending on the calling convention, this list is not necessarily // a bijection with the actual LLVM parameters of the function. - var args = std.ArrayList(*llvm.Value).init(gpa); - defer args.deinit(); + var args: std.ArrayListUnmanaged(Builder.Value) = .{}; + defer args.deinit(gpa); { - var llvm_arg_i = @as(c_uint, @intFromBool(ret_ptr != null)) + @intFromBool(err_return_tracing); var it = iterateParamTypes(o, fn_info); - while (it.next()) |lowering| switch (lowering) { - .no_bits => continue, - .byval => { - assert(!it.byval_attr); - const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types.get(ip)[param_index].toType(); - const param = llvm_func.getParam(llvm_arg_i); - try args.ensureUnusedCapacity(1); - - if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = param.typeOf(); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); - const store_inst = builder.buildStore(param, arg_ptr); - store_inst.setAlignment(alignment); - args.appendAssumeCapacity(arg_ptr); - } else { - args.appendAssumeCapacity(param); - - o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, llvm_arg_i); - } - llvm_arg_i += 1; - }, - .byref => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(mod); - - o.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); - llvm_arg_i += 1; - - try args.ensureUnusedCapacity(1); - - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(param); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, param, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .byref_mut => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(mod); - - o.addArgAttr(llvm_func, llvm_arg_i, "noundef"); - llvm_arg_i += 1; - - try args.ensureUnusedCapacity(1); + while (try it.next()) |lowering| { + try args.ensureUnusedCapacity(gpa, 1); + + switch (lowering) { + .no_bits => continue, + .byval => { + assert(!it.byval_attr); + const param_index = it.zig_index - 1; + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); + const param = wip.arg(llvm_arg_i); + + if (isByRef(param_ty, mod)) { + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const param_llvm_ty = param.typeOfWip(&wip); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); + args.appendAssumeCapacity(arg_ptr); + } else { + args.appendAssumeCapacity(param); - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(param); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, param, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .abi_sized_int => { - assert(!it.byval_attr); - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + o.addByValParamAttrs(llvm_func, param_ty, param_index, fn_info, @intCast(llvm_arg_i)); + } + llvm_arg_i += 1; + }, + .byref => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); - const param_llvm_ty = try o.lowerType(param_ty); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = o.context.intType(abi_size * 8); - const alignment = @max( - param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty), - ); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); - const store_inst = builder.buildStore(param, arg_ptr); - store_inst.setAlignment(alignment); + o.addByRefParamAttrs(llvm_func, @intCast(llvm_arg_i), @intCast(alignment.toByteUnits() orelse 0), it.byval_attr, param_llvm_ty); + llvm_arg_i += 1; - try args.ensureUnusedCapacity(1); + if (isByRef(param_ty, mod)) { + args.appendAssumeCapacity(param); + } else { + args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); + } + }, + .byref_mut => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); - if (isByRef(param_ty, mod)) { - args.appendAssumeCapacity(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - args.appendAssumeCapacity(load_inst); - } - }, - .slice => { - assert(!it.byval_attr); - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const ptr_info = param_ty.ptrInfo(mod); + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noundef"); + llvm_arg_i += 1; - if (math.cast(u5, it.zig_index - 1)) |i| { - if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { - o.addArgAttr(llvm_func, llvm_arg_i, "noalias"); + if (isByRef(param_ty, mod)) { + args.appendAssumeCapacity(param); + } else { + args.appendAssumeCapacity(try wip.load(.normal, param_llvm_ty, param, alignment, "")); } - } - if (param_ty.zigTypeTag(mod) != .Optional) { - o.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); - } - if (ptr_info.flags.is_const) { - o.addArgAttr(llvm_func, llvm_arg_i, "readonly"); - } - const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse - @max(ptr_info.child.toType().abiAlignment(mod), 1); - o.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); - const ptr_param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - const len_param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - - const slice_llvm_ty = try o.lowerType(param_ty); - const partial = builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr_param, 0, ""); - const aggregate = builder.buildInsertValue(partial, len_param, 1, ""); - try args.append(aggregate); - }, - .multiple_llvm_types => { - assert(!it.byval_attr); - const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); - const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False); - for (field_types, 0..) |_, field_i_usize| { - const field_i = @as(c_uint, @intCast(field_i_usize)); - const param = llvm_func.getParam(llvm_arg_i); + }, + .abi_sized_int => { + assert(!it.byval_attr); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; - const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); - const store_inst = builder.buildStore(param, field_ptr); - store_inst.setAlignment(target.ptrBitWidth() / 8); - } - const is_by_ref = isByRef(param_ty, mod); - const loaded = if (is_by_ref) arg_ptr else l: { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(param_alignment); - break :l load_inst; - }; - try args.append(loaded); - }, - .as_u16 => { - assert(!it.byval_attr); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - const casted = builder.buildBitCast(param, o.context.halfType(), ""); - try args.ensureUnusedCapacity(1); - args.appendAssumeCapacity(casted); - }, - .float_array => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + const param_llvm_ty = try o.lowerType(param_ty); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); + const alignment = Builder.Alignment.fromByteUnits(@max( + param_ty.abiAlignment(mod), + o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), + )); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); + + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + .slice => { + assert(!it.byval_attr); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const ptr_info = param_ty.ptrInfo(mod); + + if (math.cast(u5, it.zig_index - 1)) |i| { + if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "noalias"); + } + } + if (param_ty.zigTypeTag(mod) != .Optional) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "nonnull"); + } + if (ptr_info.flags.is_const) { + o.addArgAttr(llvm_func, @intCast(llvm_arg_i), "readonly"); + } + const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse + @max(ptr_info.child.toType().abiAlignment(mod), 1); + o.addArgAttrInt(llvm_func, @intCast(llvm_arg_i), "align", elem_align); + const ptr_param = wip.arg(llvm_arg_i + 0); + const len_param = wip.arg(llvm_arg_i + 1); + llvm_arg_i += 2; + + const slice_llvm_ty = try o.lowerType(param_ty); + args.appendAssumeCapacity( + try wip.buildAggregate(slice_llvm_ty, &.{ ptr_param, len_param }, ""), + ); + }, + .multiple_llvm_types => { + assert(!it.byval_attr); + const field_types = it.types_buffer[0..it.types_len]; + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param_alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target); + const llvm_ty = try o.builder.structType(.normal, field_types); + for (0..field_types.len) |field_i| { + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + const field_ptr = try wip.gepStruct(llvm_ty, arg_ptr, field_i, ""); + const alignment = + Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + _ = try wip.store(.normal, param, field_ptr, alignment); + } - const alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); - _ = builder.buildStore(param, arg_ptr); + const is_by_ref = isByRef(param_ty, mod); + args.appendAssumeCapacity(if (is_by_ref) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, param_alignment, "")); + }, + .as_u16 => { + assert(!it.byval_attr); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; + args.appendAssumeCapacity(try wip.cast(.bitcast, param, .half, "")); + }, + .float_array => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; - if (isByRef(param_ty, mod)) { - try args.append(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - try args.append(load_inst); - } - }, - .i32_array, .i64_array => { - const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const param_llvm_ty = try o.lowerType(param_ty); - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); - const alignment = param_ty.abiAlignment(mod); - const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, alignment, target); - _ = builder.buildStore(param, arg_ptr); + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + .i32_array, .i64_array => { + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); + const param_llvm_ty = try o.lowerType(param_ty); + const param = wip.arg(llvm_arg_i); + llvm_arg_i += 1; - if (isByRef(param_ty, mod)) { - try args.append(arg_ptr); - } else { - const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); - load_inst.setAlignment(alignment); - try args.append(load_inst); - } - }, - }; + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target); + _ = try wip.store(.normal, param, arg_ptr, alignment); + + args.appendAssumeCapacity(if (isByRef(param_ty, mod)) + arg_ptr + else + try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, "")); + }, + } + } } var di_file: ?*llvm.DIFile = null; @@ -1191,16 +1521,15 @@ pub const Object = struct { .gpa = gpa, .air = air, .liveness = liveness, - .context = o.context, .dg = &dg, + .wip = wip, .builder = builder, .ret_ptr = ret_ptr, .args = args.items, .arg_index = 0, .func_inst_table = .{}, - .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = mod.comp.bin_file.options.single_threaded, + .sync_scope = if (mod.comp.bin_file.options.single_threaded) .singlethread else .system, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1209,6 +1538,7 @@ pub const Object = struct { .err_ret_trace = err_ret_trace, }; defer fg.deinit(); + deinit_wip = false; fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { @@ -1220,6 +1550,8 @@ pub const Object = struct { else => |e| return e, }; + try fg.wip.finish(); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } @@ -1243,14 +1575,6 @@ pub const Object = struct { try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); } - /// TODO replace this with a call to `Module::getNamedValue`. This will require adding - /// a new wrapper in zig_llvm.h/zig_llvm.cpp. - fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*llvm.Value { - if (o.llvm_module.getNamedFunction(name)) |x| return x; - if (o.llvm_module.getNamedGlobal(name)) |x| return x; - return null; - } - pub fn updateDeclExports( self: *Object, mod: *Module, @@ -1260,93 +1584,133 @@ pub const Object = struct { const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. - const llvm_global = self.decl_map.get(decl_index) orelse return; + const global = self.decl_map.get(decl_index) orelse return; + const llvm_global = global.toLlvm(&self.builder); const decl = mod.declPtr(decl_index); if (decl.isExtern(mod)) { - var free_decl_name = false; const decl_name = decl_name: { const decl_name = mod.intern_pool.stringToSlice(decl.name); if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { - free_decl_name = true; - break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ - decl_name, lib_name, - }); + break :decl_name try self.builder.fmt("{s}|{s}", .{ decl_name, lib_name }); } } } - break :decl_name decl_name; + break :decl_name try self.builder.string(decl_name); }; - defer if (free_decl_name) gpa.free(decl_name); - llvm_global.setValueName(decl_name); - if (self.getLlvmGlobal(decl_name)) |other_global| { - if (other_global != llvm_global) { + if (self.builder.getGlobal(decl_name)) |other_global| { + if (other_global.toLlvm(&self.builder) != llvm_global) { try self.extern_collisions.put(gpa, decl_index, {}); } } + + try global.rename(decl_name, &self.builder); + global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); + global.ptr(&self.builder).linkage = .external; llvm_global.setLinkage(.External); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) { + global.ptr(&self.builder).dll_storage_class = .default; + llvm_global.setDLLStorageClass(.Default); + } if (self.di_map.get(decl)) |di_node| { + const decl_name_slice = decl_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { - const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); + const di_func: *llvm.DISubprogram = @ptrCast(di_node); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); + const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, decl_name_slice.ptr, decl_name_slice.len); di_global.replaceLinkageName(linkage_name); } } - if (decl.val.getVariable(mod)) |variable| { - if (variable.is_threadlocal) { + if (decl.val.getVariable(mod)) |decl_var| { + if (decl_var.is_threadlocal) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.is_weak_linkage) { + if (decl_var.is_weak_linkage) { + global.ptr(&self.builder).linkage = .extern_weak; llvm_global.setLinkage(.ExternalWeak); } } + global.ptr(&self.builder).updateAttributes(); } else if (exports.len != 0) { - const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name); - llvm_global.setValueName2(exp_name.ptr, exp_name.len); + const exp_name = try self.builder.string(mod.intern_pool.stringToSlice(exports[0].opts.name)); + try global.rename(exp_name, &self.builder); + global.ptr(&self.builder).unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) { + global.ptr(&self.builder).dll_storage_class = .dllexport; + llvm_global.setDLLStorageClass(.DLLExport); + } if (self.di_map.get(decl)) |di_node| { + const exp_name_slice = exp_name.toSlice(&self.builder).?; if (try decl.isFunction(mod)) { - const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); + const di_func: *llvm.DISubprogram = @ptrCast(di_node); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); di_func.replaceLinkageName(linkage_name); } else { - const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node)); - const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); + const di_global: *llvm.DIGlobalVariable = @ptrCast(di_node); + const linkage_name = llvm.MDString.get(self.builder.llvm.context, exp_name_slice.ptr, exp_name_slice.len); di_global.replaceLinkageName(linkage_name); } } switch (exports[0].opts.linkage) { .Internal => unreachable, - .Strong => llvm_global.setLinkage(.External), - .Weak => llvm_global.setLinkage(.WeakODR), - .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), + .Strong => { + global.ptr(&self.builder).linkage = .external; + llvm_global.setLinkage(.External); + }, + .Weak => { + global.ptr(&self.builder).linkage = .weak_odr; + llvm_global.setLinkage(.WeakODR); + }, + .LinkOnce => { + global.ptr(&self.builder).linkage = .linkonce_odr; + llvm_global.setLinkage(.LinkOnceODR); + }, } switch (exports[0].opts.visibility) { - .default => llvm_global.setVisibility(.Default), - .hidden => llvm_global.setVisibility(.Hidden), - .protected => llvm_global.setVisibility(.Protected), + .default => { + global.ptr(&self.builder).visibility = .default; + llvm_global.setVisibility(.Default); + }, + .hidden => { + global.ptr(&self.builder).visibility = .hidden; + llvm_global.setVisibility(.Hidden); + }, + .protected => { + global.ptr(&self.builder).visibility = .protected; + llvm_global.setVisibility(.Protected); + }, } if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { + switch (global.ptrConst(&self.builder).kind) { + inline .variable, .function => |impl_index| impl_index.ptr(&self.builder).section = + try self.builder.string(section), + else => unreachable, + } llvm_global.setSection(section); } - if (decl.val.getVariable(mod)) |variable| { - if (variable.is_threadlocal) { + if (decl.val.getVariable(mod)) |decl_var| { + if (decl_var.is_threadlocal) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } + global.ptr(&self.builder).updateAttributes(); // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. @@ -1361,7 +1725,7 @@ pub const Object = struct { alias.setAliasee(llvm_global); } else { _ = self.llvm_module.addAlias( - llvm_global.globalGetValueType(), + global.ptrConst(&self.builder).type.toLlvm(&self.builder), 0, llvm_global, exp_name_z, @@ -1369,32 +1733,42 @@ pub const Object = struct { } } } else { - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); - llvm_global.setValueName2(fqn.ptr, fqn.len); + const fqn = try self.builder.string(mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod))); + try global.rename(fqn, &self.builder); + global.ptr(&self.builder).linkage = .internal; llvm_global.setLinkage(.Internal); - if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) { + global.ptr(&self.builder).dll_storage_class = .default; + llvm_global.setDLLStorageClass(.Default); + } + global.ptr(&self.builder).unnamed_addr = .unnamed_addr; llvm_global.setUnnamedAddr(.True); - if (decl.val.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |decl_var| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.is_threadlocal and !single_threaded) { + if (decl_var.is_threadlocal and !single_threaded) { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + global.ptrConst(&self.builder).kind.variable.ptr(&self.builder).thread_local = + .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } } + global.ptr(&self.builder).updateAttributes(); } } pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void { - const llvm_value = self.decl_map.get(decl_index) orelse return; - llvm_value.deleteGlobal(); + const global = self.decl_map.get(decl_index) orelse return; + global.toLlvm(&self.builder).deleteGlobal(); } fn getDIFile(o: *Object, gpa: Allocator, file: *const Module.File) !*llvm.DIFile { const gop = try o.di_map.getOrPut(gpa, file); errdefer assert(o.di_map.remove(file)); if (gop.found_existing) { - return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*)); + return @ptrCast(gop.value_ptr.*); } const dir_path_z = d: { var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; @@ -1542,7 +1916,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, enumerators.ptr, - @as(c_int, @intCast(enumerators.len)), + @intCast(enumerators.len), try o.lowerDebugType(int_ty, .full), "", ); @@ -1717,7 +2091,7 @@ pub const Object = struct { ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(mod), .full), - @as(i64, @intCast(ty.arrayLen(mod))), + @intCast(ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); @@ -2022,7 +2396,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang null, // vtable holder "", // unique id @@ -2109,7 +2483,7 @@ pub const Object = struct { 0, // flags null, // derived from di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang null, // vtable holder "", // unique id @@ -2221,7 +2595,7 @@ pub const Object = struct { ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, - @as(c_int, @intCast(di_fields.items.len)), + @intCast(di_fields.items.len), 0, // run time lang "", // unique id ); @@ -2334,7 +2708,7 @@ pub const Object = struct { const fn_di_ty = dib.createSubroutineType( param_di_types.items.ptr, - @as(c_int, @intCast(param_di_types.items.len)), + @intCast(param_di_types.items.len), 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2420,52 +2794,16 @@ pub const Object = struct { return buffer.toOwnedSliceSentinel(0); } - fn getNullOptAddr(o: *Object) !*llvm.Value { - if (o.null_opt_addr) |global| return global; - - const mod = o.module; - const target = mod.getTarget(); - const ty = try mod.intern(.{ .opt_type = .usize_type }); - const null_opt_usize = try mod.intern(.{ .opt = .{ - .ty = ty, - .val = .none, - } }); - - const llvm_init = try o.lowerValue(.{ - .ty = ty.toType(), - .val = null_opt_usize.toValue(), - }); - const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const global = o.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(), - "", - llvm_actual_addrspace, - ); - global.setLinkage(.Internal); - global.setUnnamedAddr(.True); - global.setAlignment(ty.toType().abiAlignment(mod)); - global.setInitializer(llvm_init); - - const addrspace_casted_global = if (llvm_wanted_addrspace != llvm_actual_addrspace) - global.constAddrSpaceCast(o.context.pointerType(llvm_wanted_addrspace)) - else - global; - - o.null_opt_addr = addrspace_casted_global; - return addrspace_casted_global; - } - /// If the llvm function does not exist, create it. /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. - fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) !*llvm.Value { + fn resolveLlvmFunction(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Function.Index { const mod = o.module; const gpa = o.gpa; const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try o.decl_map.getOrPut(gpa, decl_index); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function; assert(decl.has_tv); const fn_info = mod.typeToFunc(zig_fn_type).?; @@ -2474,16 +2812,25 @@ pub const Object = struct { const fn_type = try o.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(mod); const ip = &mod.intern_pool; + const fqn = try o.builder.string(ip.stringToSlice(try decl.getFullyQualifiedName(mod))); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = o.llvm_module.addFunctionInAddressSpace(ip.stringToSlice(fqn), fn_type, llvm_addrspace); - gop.value_ptr.* = llvm_fn; + const llvm_fn = o.llvm_module.addFunctionInAddressSpace(fqn.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder), @intFromEnum(llvm_addrspace)); + + var global = Builder.Global{ + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; const is_extern = decl.isExtern(mod); if (!is_extern) { + global.linkage = .internal; llvm_fn.setLinkage(.Internal); + global.unnamed_addr = .unnamed_addr; llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { @@ -2500,7 +2847,7 @@ pub const Object = struct { o.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 o.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try o.lowerType(fn_info.return_type.toType()); + const raw_llvm_ret_ty = (try o.lowerType(fn_info.return_type.toType())).toLlvm(&o.builder); llvm_fn.addSretAttr(raw_llvm_ret_ty); } @@ -2528,7 +2875,8 @@ pub const Object = struct { } if (fn_info.alignment.toByteUnitsOptional()) |a| { - llvm_fn.setAlignment(@as(c_uint, @intCast(a))); + function.alignment = Builder.Alignment.fromByteUnits(a); + llvm_fn.setAlignment(@intCast(a)); } // Function attributes that are independent of analysis results of the function body. @@ -2544,7 +2892,7 @@ pub const Object = struct { var it = iterateParamTypes(o, fn_info); it.llvm_index += @intFromBool(sret); it.llvm_index += @intFromBool(err_return_tracing); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); @@ -2576,7 +2924,10 @@ pub const Object = struct { }; } - return llvm_fn; + try o.builder.llvm.globals.append(o.gpa, llvm_fn); + gop.value_ptr.* = try o.builder.addGlobal(fqn, global); + try o.builder.functions.append(o.gpa, function); + return global.kind.function; } fn addCommonFnAttributes(o: *Object, llvm_fn: *llvm.Value) void { @@ -2622,65 +2973,80 @@ pub const Object = struct { } } - fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Error!*llvm.Value { + fn resolveGlobalDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Variable.Index { const gop = try o.decl_map.getOrPut(o.gpa, decl_index); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; errdefer assert(o.decl_map.remove(decl_index)); const mod = o.module; const decl = mod.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(mod); + const fqn = try o.builder.string(mod.intern_pool.stringToSlice( + try decl.getFullyQualifiedName(mod), + )); const target = mod.getTarget(); - const llvm_type = try o.lowerType(decl.ty); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); + var global = Builder.Global{ + .addr_space = toLlvmGlobalAddressSpace(decl.@"addrspace", target), + .type = try o.lowerType(decl.ty), + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + }; + const is_extern = decl.isExtern(mod); + const name = if (is_extern) + try o.builder.string(mod.intern_pool.stringToSlice(decl.name)) + else + fqn; const llvm_global = o.llvm_module.addGlobalInAddressSpace( - llvm_type, - mod.intern_pool.stringToSlice(fqn), - llvm_actual_addrspace, + global.type.toLlvm(&o.builder), + fqn.toSlice(&o.builder).?, + @intFromEnum(global.addr_space), ); - gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern(mod)) { - llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name)); + if (is_extern) { + global.unnamed_addr = .default; llvm_global.setUnnamedAddr(.False); + global.linkage = .external; llvm_global.setLinkage(.External); - if (decl.val.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |decl_var| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.is_threadlocal and !single_threaded) { + if (decl_var.is_threadlocal and !single_threaded) { + variable.thread_local = .generaldynamic; llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { + variable.thread_local = .default; llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (decl_var.is_weak_linkage) { + global.linkage = .extern_weak; + llvm_global.setLinkage(.ExternalWeak); + } } } else { + global.linkage = .internal; llvm_global.setLinkage(.Internal); + global.unnamed_addr = .unnamed_addr; llvm_global.setUnnamedAddr(.True); } - return llvm_global; - } - - fn isUnnamedType(o: *Object, ty: Type, val: *llvm.Value) bool { - // Once `lowerType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `lowerType` fails here it means - // it is the first time lowering the type, which means the value can't possible - // have that type. - const llvm_ty = o.lowerType(ty) catch return true; - return val.typeOf() != llvm_ty; + try o.builder.llvm.globals.append(o.gpa, llvm_global); + gop.value_ptr.* = try o.builder.addGlobal(name, global); + try o.builder.variables.append(o.gpa, variable); + return global.kind.variable; } - fn lowerType(o: *Object, t: Type) Allocator.Error!*llvm.Type { - const llvm_ty = try lowerTypeInner(o, t); + fn lowerType(o: *Object, t: Type) Allocator.Error!Builder.Type { + const ty = try o.lowerTypeInner(t); const mod = o.module; if (std.debug.runtime_safety and false) check: { + const llvm_ty = ty.toLlvm(&o.builder); if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; - if (!llvm_ty.isSized().toBool()) break :check; + if (!try ty.isSized(&o.builder)) break :check; const zig_size = t.abiSize(mod); const llvm_size = o.target_data.abiSizeOfType(llvm_ty); @@ -2690,456 +3056,511 @@ pub const Object = struct { }); } } - return llvm_ty; + return ty; } - fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!*llvm.Type { - const gpa = o.gpa; + fn lowerTypeInner(o: *Object, t: Type) Allocator.Error!Builder.Type { const mod = o.module; const target = mod.getTarget(); - switch (t.zigTypeTag(mod)) { - .Void, .NoReturn => return o.context.voidType(), - .Int => { - const info = t.intInfo(mod); - assert(info.bits != 0); - return o.context.intType(info.bits); - }, - .Enum => { - const int_ty = t.intTagType(mod); - const bit_count = int_ty.intInfo(mod).bits; - assert(bit_count != 0); - return o.context.intType(bit_count); - }, - .Float => switch (t.floatBits(target)) { - 16 => return if (backendSupportsF16(target)) o.context.halfType() else o.context.intType(16), - 32 => return o.context.floatType(), - 64 => return o.context.doubleType(), - 80 => return if (backendSupportsF80(target)) o.context.x86FP80Type() else o.context.intType(80), - 128 => return o.context.fp128Type(), + return switch (t.toIntern()) { + .u0_type, .i0_type => unreachable, + inline .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + => |tag| @field(Builder.Type, "i" ++ @tagName(tag)[1 .. @tagName(tag).len - "_type".len]), + .usize_type, .isize_type => try o.builder.intType(target.ptrBitWidth()), + inline .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => |tag| try o.builder.intType(target.c_type_bit_size( + @field(std.Target.CType, @tagName(tag)["c_".len .. @tagName(tag).len - "_type".len]), + )), + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => switch (t.floatBits(target)) { + 16 => if (backendSupportsF16(target)) .half else .i16, + 32 => .float, + 64 => .double, + 80 => if (backendSupportsF80(target)) .x86_fp80 else .i80, + 128 => .fp128, else => unreachable, }, - .Bool => return o.context.intType(1), - .Pointer => { - if (t.isSlice(mod)) { - const ptr_type = t.slicePtrFieldType(mod); - - const fields: [2]*llvm.Type = .{ - try o.lowerType(ptr_type), - try o.lowerType(Type.usize), + .anyopaque_type => unreachable, + .bool_type => .i1, + .void_type => .void, + .type_type => unreachable, + .anyerror_type => Builder.Type.err_int, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + => unreachable, + .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + => unreachable, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + => .ptr, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => try o.builder.structType(.normal, &.{ .ptr, try o.lowerType(Type.usize) }), + .optional_noreturn_type => unreachable, + .anyerror_void_error_union_type, + .adhoc_inferred_error_set_type, + => Builder.Type.err_int, + .generic_poison_type, + .empty_struct_type, + => unreachable, + // values, not types + .undef, + .zero, + .zero_usize, + .zero_u8, + .one, + .one_usize, + .one_u8, + .four_u8, + .negative_one, + .calling_convention_c, + .calling_convention_inline, + .void_value, + .unreachable_value, + .null_value, + .bool_true, + .bool_false, + .empty_struct, + .generic_poison, + .var_args_param_type, + .none, + => unreachable, + else => switch (mod.intern_pool.indexToKey(t.toIntern())) { + .int_type => |int_type| try o.builder.intType(int_type.bits), + .ptr_type => |ptr_type| type: { + const ptr_ty = try o.builder.ptrType( + toLlvmAddressSpace(ptr_type.flags.address_space, target), + ); + break :type switch (ptr_type.flags.size) { + .One, .Many, .C => ptr_ty, + .Slice => try o.builder.structType(.normal, &.{ + ptr_ty, + try o.lowerType(Type.usize), + }), }; - return o.context.structType(&fields, fields.len, .False); - } - const ptr_info = t.ptrInfo(mod); - const llvm_addrspace = toLlvmAddressSpace(ptr_info.flags.address_space, target); - return o.context.pointerType(llvm_addrspace); - }, - .Opaque => { - if (t.toIntern() == .anyopaque_type) return o.context.intType(8); - - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; - const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type)); - - const llvm_struct_ty = o.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .Array => { - const elem_ty = t.childType(mod); - if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); - const elem_llvm_ty = try o.lowerType(elem_ty); - const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null); - return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len))); - }, - .Vector => { - const elem_type = try o.lowerType(t.childType(mod)); - return elem_type.vectorType(t.vectorLen(mod)); - }, - .Optional => { - const child_ty = t.optionalChild(mod); - if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return o.context.intType(8); - } - const payload_llvm_ty = try o.lowerType(child_ty); - if (t.optionalReprIsPayload(mod)) { - return payload_llvm_ty; - } - - comptime assert(optional_layout_version == 3); - var fields_buf: [3]*llvm.Type = .{ - payload_llvm_ty, o.context.intType(8), undefined, - }; - const offset = child_ty.abiSize(mod) + 1; - const abi_size = t.abiSize(mod); - const padding = @as(c_uint, @intCast(abi_size - offset)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); - } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - }, - .ErrorUnion => { - const payload_ty = t.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return try o.lowerType(Type.anyerror); - } - const llvm_error_type = try o.lowerType(Type.anyerror); - const llvm_payload_type = try o.lowerType(payload_ty); - - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - - const payload_size = payload_ty.abiSize(mod); - const error_size = Type.anyerror.abiSize(mod); - - var fields_buf: [3]*llvm.Type = undefined; - if (error_align > payload_align) { - fields_buf[0] = llvm_error_type; - fields_buf[1] = llvm_payload_type; - const payload_end = - std.mem.alignForward(u64, error_size, payload_align) + - payload_size; - const abi_size = std.mem.alignForward(u64, payload_end, error_align); - const padding = @as(c_uint, @intCast(abi_size - payload_end)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); + }, + .array_type => |array_type| o.builder.arrayType( + array_type.len + @intFromBool(array_type.sentinel != .none), + try o.lowerType(array_type.child.toType()), + ), + .vector_type => |vector_type| o.builder.vectorType( + .normal, + vector_type.len, + try o.lowerType(vector_type.child.toType()), + ), + .opt_type => |child_ty| { + if (!child_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) return .i8; + + const payload_ty = try o.lowerType(child_ty.toType()); + if (t.optionalReprIsPayload(mod)) return payload_ty; + + comptime assert(optional_layout_version == 3); + var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined }; + var fields_len: usize = 2; + const offset = child_ty.toType().abiSize(mod) + 1; + const abi_size = t.abiSize(mod); + const padding_len = abi_size - offset; + if (padding_len > 0) { + fields[2] = try o.builder.arrayType(padding_len, .i8); + fields_len = 3; } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - } else { - fields_buf[0] = llvm_payload_type; - fields_buf[1] = llvm_error_type; - const error_end = - std.mem.alignForward(u64, payload_size, error_align) + - error_size; - const abi_size = std.mem.alignForward(u64, error_end, payload_align); - const padding = @as(c_uint, @intCast(abi_size - error_end)); - if (padding == 0) { - return o.context.structType(&fields_buf, 2, .False); + return o.builder.structType(.normal, fields[0..fields_len]); + }, + .anyframe_type => @panic("TODO implement lowerType for AnyFrame types"), + .error_union_type => |error_union_type| { + const error_type = Builder.Type.err_int; + if (!error_union_type.payload_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + return error_type; + const payload_type = try o.lowerType(error_union_type.payload_type.toType()); + + const payload_align = error_union_type.payload_type.toType().abiAlignment(mod); + const error_align = Type.err_int.abiAlignment(mod); + + const payload_size = error_union_type.payload_type.toType().abiSize(mod); + const error_size = Type.err_int.abiSize(mod); + + var fields: [3]Builder.Type = undefined; + var fields_len: usize = 2; + const padding_len = if (error_align > payload_align) pad: { + fields[0] = error_type; + fields[1] = payload_type; + const payload_end = + std.mem.alignForward(u64, error_size, payload_align) + + payload_size; + const abi_size = std.mem.alignForward(u64, payload_end, error_align); + break :pad abi_size - payload_end; + } else pad: { + fields[0] = payload_type; + fields[1] = error_type; + const error_end = + std.mem.alignForward(u64, payload_size, error_align) + + error_size; + const abi_size = std.mem.alignForward(u64, error_end, payload_align); + break :pad abi_size - error_end; + }; + if (padding_len > 0) { + fields[2] = try o.builder.arrayType(padding_len, .i8); + fields_len = 3; } - fields_buf[2] = o.context.intType(8).arrayType(padding); - return o.context.structType(&fields_buf, 3, .False); - } - }, - .ErrorSet => return o.context.intType(16), - .Struct => { - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { - .anon_struct_type => |tuple| { - const llvm_struct_ty = o.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return o.builder.structType(.normal, fields[0..fields_len]); + }, + .simple_type => unreachable, + .struct_type => |struct_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (gop.found_existing) return gop.value_ptr.*; - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const int_ty = try o.lowerType(struct_obj.backing_int_ty); + gop.value_ptr.* = int_ty; + return int_ty; + } - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try struct_obj.getFullyQualifiedName(mod), + )); + const ty = try o.builder.opaqueType(name); + gop.value_ptr.* = ty; // must be done before any recursive calls - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; + assert(struct_obj.haveFieldTypes()); - for (tuple.types, tuple.values) |field_ty, field_val| { - if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; + defer llvm_field_types.deinit(o.gpa); + try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_obj.fields.count()); - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try o.lowerType(field_ty.toType()); - try llvm_field_types.append(gpa, field_llvm_ty); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 1; + var struct_kind: Builder.Type.Structure.Kind = .normal; - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); - } - } + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); + if (field_align < field_ty_align) struct_kind = .@"packed"; + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, field_align); - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @as(c_uint, @intCast(llvm_field_types.items.len)), - .False, + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), ); + try llvm_field_types.append(o.gpa, try o.lowerType(field.ty)); - return llvm_struct_ty; - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; - - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const int_llvm_ty = try o.lowerType(struct_obj.backing_int_ty); - gop.value_ptr.* = int_llvm_ty; - return int_llvm_ty; - } - - const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + } - const llvm_struct_ty = o.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(struct_kind, llvm_field_types.items), + ); + return ty; + }, + .anon_struct_type => |anon_struct_type| { + var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .{}; + defer llvm_field_types.deinit(o.gpa); + try llvm_field_types.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len); - assert(struct_obj.haveFieldTypes()); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + for (anon_struct_type.types, anon_struct_type.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count()); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, field_align); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 1; - var any_underaligned_fields = false; + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); + try llvm_field_types.append(o.gpa, try o.lowerType(field_ty.toType())); - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(mod); - any_underaligned_fields = any_underaligned_fields or - field_align < field_ty_align; - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); + offset += field_ty.toType().abiSize(mod); } - const field_llvm_ty = try o.lowerType(field.ty); - try llvm_field_types.append(gpa, field_llvm_ty); - - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForward(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - try llvm_field_types.append(gpa, llvm_array_ty); + { + const prev_offset = offset; + offset = std.mem.alignForward(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) try llvm_field_types.append( + o.gpa, + try o.builder.arrayType(padding_len, .i8), + ); } - } - - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @as(c_uint, @intCast(llvm_field_types.items.len)), - llvm.Bool.fromBool(any_underaligned_fields), - ); - - return llvm_struct_ty; - }, - .Union => { - const gop = try o.type_map.getOrPut(gpa, t.toIntern()); - if (gop.found_existing) return gop.value_ptr.*; - - const layout = t.unionGetLayout(mod); - const union_obj = mod.typeToUnion(t).?; + return o.builder.structType(.normal, llvm_field_types.items); + }, + .union_type => |union_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (gop.found_existing) return gop.value_ptr.*; - if (union_obj.layout == .Packed) { - const bitsize = @as(c_uint, @intCast(t.bitSize(mod))); - const int_llvm_ty = o.context.intType(bitsize); - gop.value_ptr.* = int_llvm_ty; - return int_llvm_ty; - } + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, union_type.hasTag()); - if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty); - gop.value_ptr.* = enum_tag_llvm_ty; - return enum_tag_llvm_ty; - } + if (union_obj.layout == .Packed) { + const int_ty = try o.builder.intType(@intCast(t.bitSize(mod))); + gop.value_ptr.* = int_ty; + return int_ty; + } - const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + if (layout.payload_size == 0) { + const enum_tag_ty = try o.lowerType(union_obj.tag_ty); + gop.value_ptr.* = enum_tag_ty; + return enum_tag_ty; + } - const llvm_union_ty = o.context.structCreateNamed(name); - gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try union_obj.getFullyQualifiedName(mod), + )); + const ty = try o.builder.opaqueType(name); + gop.value_ptr.* = ty; // must be done before any recursive calls - const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try o.lowerType(aligned_field.ty); + const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; + const aligned_field_ty = try o.lowerType(aligned_field.ty); - const llvm_payload_ty = t: { - if (layout.most_aligned_field_size == layout.payload_size) { - break :t llvm_aligned_field_ty; - } - const padding_len = if (layout.tag_size == 0) - @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size)) - else - @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size)); - const fields: [2]*llvm.Type = .{ - llvm_aligned_field_ty, - o.context.intType(8).arrayType(padding_len), + const payload_ty = ty: { + if (layout.most_aligned_field_size == layout.payload_size) { + break :ty aligned_field_ty; + } + const padding_len = if (layout.tag_size == 0) + layout.abi_size - layout.most_aligned_field_size + else + layout.payload_size - layout.most_aligned_field_size; + break :ty try o.builder.structType(.@"packed", &.{ + aligned_field_ty, + try o.builder.arrayType(padding_len, .i8), + }); }; - break :t o.context.structType(&fields, fields.len, .True); - }; - if (layout.tag_size == 0) { - var llvm_fields: [1]*llvm.Type = .{llvm_payload_ty}; - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); - return llvm_union_ty; - } - const enum_tag_llvm_ty = try o.lowerType(union_obj.tag_ty); + if (layout.tag_size == 0) { + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(.normal, &.{payload_ty}), + ); + return ty; + } + const enum_tag_ty = try o.lowerType(union_obj.tag_ty); - // Put the tag before or after the payload depending on which one's - // alignment is greater. - var llvm_fields: [3]*llvm.Type = undefined; - var llvm_fields_len: c_uint = 2; + // Put the tag before or after the payload depending on which one's + // alignment is greater. + var llvm_fields: [3]Builder.Type = undefined; + var llvm_fields_len: usize = 2; - if (layout.tag_align >= layout.payload_align) { - llvm_fields = .{ enum_tag_llvm_ty, llvm_payload_ty, undefined }; - } else { - llvm_fields = .{ llvm_payload_ty, enum_tag_llvm_ty, undefined }; - } + if (layout.tag_align >= layout.payload_align) { + llvm_fields = .{ enum_tag_ty, payload_ty, .none }; + } else { + llvm_fields = .{ payload_ty, enum_tag_ty, .none }; + } - // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. - if (layout.padding != 0) { - llvm_fields[2] = o.context.intType(8).arrayType(layout.padding); - llvm_fields_len = 3; - } + // Insert padding to make the LLVM struct ABI size match the Zig union ABI size. + if (layout.padding != 0) { + llvm_fields[llvm_fields_len] = try o.builder.arrayType(layout.padding, .i8); + llvm_fields_len += 1; + } - llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); - return llvm_union_ty; + try o.builder.namedTypeSetBody( + ty, + try o.builder.structType(.normal, llvm_fields[0..llvm_fields_len]), + ); + return ty; + }, + .opaque_type => |opaque_type| { + const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); + if (!gop.found_existing) { + const name = try o.builder.string(mod.intern_pool.stringToSlice( + try mod.opaqueFullyQualifiedName(opaque_type), + )); + gop.value_ptr.* = try o.builder.opaqueType(name); + } + return gop.value_ptr.*; + }, + .enum_type => |enum_type| try o.lowerType(enum_type.tag_ty.toType()), + .func_type => |func_type| try o.lowerTypeFn(func_type), + .error_set_type, .inferred_error_set_type => Builder.Type.err_int, + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - .Fn => return lowerTypeFn(o, t), - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .EnumLiteral => unreachable, + }; + } - .Frame => @panic("TODO implement llvmType for Frame types"), - .AnyFrame => @panic("TODO implement llvmType for AnyFrame types"), - } + /// Use this instead of lowerType when you want to handle correctly the case of elem_ty + /// being a zero bit type, but it should still be lowered as an i8 in such case. + /// There are other similar cases handled here as well. + fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!Builder.Type { + const mod = o.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { + .Opaque => true, + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), + }; + return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; } - fn lowerTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { + fn lowerTypeFn(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const ip = &mod.intern_pool; - const fn_info = mod.typeToFunc(fn_ty).?; - const llvm_ret_ty = try lowerFnRetTy(o, fn_info); + const target = mod.getTarget(); + const ret_ty = try lowerFnRetTy(o, fn_info); - var llvm_params = std.ArrayList(*llvm.Type).init(o.gpa); - defer llvm_params.deinit(); + var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; + defer llvm_params.deinit(o.gpa); if (firstParamSRet(fn_info, mod)) { - try llvm_params.append(o.context.pointerType(0)); + try llvm_params.append(o.gpa, .ptr); } if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); - try llvm_params.append(try o.lowerType(ptr_ty)); + try llvm_params.append(o.gpa, try o.lowerType(ptr_ty)); } var it = iterateParamTypes(o, fn_info); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - try llvm_params.append(try o.lowerType(param_ty)); + try llvm_params.append(o.gpa, try o.lowerType(param_ty)); }, .byref, .byref_mut => { - try llvm_params.append(o.context.pointerType(0)); + try llvm_params.append(o.gpa, .ptr); }, .abi_sized_int => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - try llvm_params.append(o.context.intType(abi_size * 8)); + try llvm_params.append(o.gpa, try o.builder.intType( + @intCast(param_ty.abiSize(mod) * 8), + )); }, .slice => { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); - const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(mod) - else - param_ty.slicePtrFieldType(mod); - const ptr_llvm_ty = try o.lowerType(ptr_ty); - const len_llvm_ty = try o.lowerType(Type.usize); - - try llvm_params.ensureUnusedCapacity(2); - llvm_params.appendAssumeCapacity(ptr_llvm_ty); - llvm_params.appendAssumeCapacity(len_llvm_ty); + try llvm_params.appendSlice(o.gpa, &.{ + try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)), + try o.lowerType(Type.usize), + }); }, .multiple_llvm_types => { - try llvm_params.appendSlice(it.llvm_types_buffer[0..it.llvm_types_len]); + try llvm_params.appendSlice(o.gpa, it.types_buffer[0..it.types_len]); }, .as_u16 => { - try llvm_params.append(o.context.intType(16)); + try llvm_params.append(o.gpa, .i16); }, .float_array => |count| { const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); - const field_count = @as(c_uint, @intCast(count)); - const arr_ty = float_ty.arrayType(field_count); - try llvm_params.append(arr_ty); + try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty)); }, .i32_array, .i64_array => |arr_len| { - const elem_size: u8 = if (lowering == .i32_array) 32 else 64; - const arr_ty = o.context.intType(elem_size).arrayType(arr_len); - try llvm_params.append(arr_ty); + try llvm_params.append(o.gpa, try o.builder.arrayType(arr_len, switch (lowering) { + .i32_array => .i32, + .i64_array => .i64, + else => unreachable, + })); }, }; - return llvm.functionType( - llvm_ret_ty, - llvm_params.items.ptr, - @as(c_uint, @intCast(llvm_params.items.len)), - llvm.Bool.fromBool(fn_info.is_var_args), + return o.builder.fnType( + ret_ty, + llvm_params.items, + if (fn_info.is_var_args) .vararg else .normal, ); } - /// Use this instead of lowerType when you want to handle correctly the case of elem_ty - /// being a zero bit type, but it should still be lowered as an i8 in such case. - /// There are other similar cases handled here as well. - fn lowerPtrElemTy(o: *Object, elem_ty: Type) Allocator.Error!*llvm.Type { + fn lowerValue(o: *Object, arg_val: InternPool.Index) Error!Builder.Constant { const mod = o.module; - const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { - .Opaque => true, - .Fn => !mod.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), - else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), - }; - const llvm_elem_ty = if (lower_elem_ty) - try o.lowerType(elem_ty) - else - o.context.intType(8); - - return llvm_elem_ty; - } - - fn lowerValue(o: *Object, arg_tv: TypedValue) Error!*llvm.Value { - const mod = o.module; - const gpa = o.gpa; const target = mod.getTarget(); - var tv = arg_tv; - switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { - .runtime_value => |rt| tv.val = rt.val.toValue(), + + var val = arg_val.toValue(); + const arg_val_key = mod.intern_pool.indexToKey(arg_val); + switch (arg_val_key) { + .runtime_value => |rt| val = rt.val.toValue(), else => {}, } - if (tv.val.isUndefDeep(mod)) { - const llvm_type = try o.lowerType(tv.ty); - return llvm_type.getUndef(); + if (val.isUndefDeep(mod)) { + return o.builder.undefConst(try o.lowerType(arg_val_key.typeOf().toType())); } - switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + const val_key = mod.intern_pool.indexToKey(val.toIntern()); + const ty = val_key.typeOf().toType(); + return switch (val_key) { .int_type, .ptr_type, .array_type, @@ -3167,10 +3588,8 @@ pub const Object = struct { .@"unreachable", .generic_poison, => unreachable, // non-runtime values - .false, .true => { - const llvm_type = try o.lowerType(tv.ty); - return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); - }, + .false => .false, + .true => .true, }, .variable, .enum_literal, @@ -3180,309 +3599,276 @@ pub const Object = struct { const fn_decl_index = extern_func.decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); - return o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(fn_decl_index); + return function_index.ptrConst(&o.builder).global.toConst(); }, .func => |func| { const fn_decl_index = func.owner_decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); - return o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(fn_decl_index); + return function_index.ptrConst(&o.builder).global.toConst(); }, .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - return lowerBigInt(o, tv.ty, bigint); + const bigint = val.toBigInt(&bigint_space, mod); + return lowerBigInt(o, ty, bigint); }, .err => |err| { - const llvm_ty = try o.lowerType(Type.anyerror); const int = try mod.getErrorValue(err.name); - return llvm_ty.constInt(int, .False); + const llvm_int = try o.builder.intConst(Builder.Type.err_int, int); + return llvm_int; }, .error_union => |error_union| { - const err_tv: TypedValue = switch (error_union.val) { - .err_name => |err_name| .{ - .ty = tv.ty.errorUnionSet(mod), - .val = (try mod.intern(.{ .err = .{ - .ty = tv.ty.errorUnionSet(mod).toIntern(), - .name = err_name, - } })).toValue(), - }, - .payload => .{ - .ty = Type.err_int, - .val = try mod.intValue(Type.err_int, 0), - }, + const err_val = switch (error_union.val) { + .err_name => |err_name| try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } }), + .payload => (try mod.intValue(Type.err_int, 0)).toIntern(), }; - const payload_type = tv.ty.errorUnionPayload(mod); + const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - return o.lowerValue(err_tv); + return o.lowerValue(err_val); } const payload_align = payload_type.abiAlignment(mod); - const error_align = err_tv.ty.abiAlignment(mod); - const llvm_error_value = try o.lowerValue(err_tv); - const llvm_payload_value = try o.lowerValue(.{ - .ty = payload_type, - .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), - .payload => |payload| payload, - }.toValue(), + const error_align = Type.err_int.abiAlignment(mod); + const llvm_error_value = try o.lowerValue(err_val); + const llvm_payload_value = try o.lowerValue(switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .payload => |payload| payload, }); - var fields_buf: [3]*llvm.Value = undefined; - - const llvm_ty = try o.lowerType(tv.ty); - const llvm_field_count = llvm_ty.countStructElementTypes(); - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); - } + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; if (error_align > payload_align) { - fields_buf[0] = llvm_error_value; - fields_buf[1] = llvm_payload_value; - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + vals[0] = llvm_error_value; + vals[1] = llvm_payload_value; } else { - fields_buf[0] = llvm_payload_value; - fields_buf[1] = llvm_error_value; - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + vals[0] = llvm_payload_value; + vals[1] = llvm_error_value; } - }, - .enum_tag => { - const int_val = try tv.intFromEnum(mod); - - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, mod); - - const int_info = tv.ty.intInfo(mod); - const llvm_type = o.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @as(c_uint, @intCast(bigint.limbs.len)), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); + fields[0] = vals[0].typeOf(&o.builder); + fields[1] = vals[1].typeOf(&o.builder); + + const llvm_ty = try o.lowerType(ty); + const llvm_ty_fields = llvm_ty.structFields(&o.builder); + if (llvm_ty_fields.len > 2) { + assert(llvm_ty_fields.len == 3); + fields[2] = llvm_ty_fields[2]; + vals[2] = try o.builder.undefConst(fields[2]); } - return unsigned_val; + return o.builder.structConst(try o.builder.structType( + llvm_ty.structKind(&o.builder), + fields[0..llvm_ty_fields.len], + ), vals[0..llvm_ty_fields.len]); }, - .float => { - const llvm_ty = try o.lowerType(tv.ty); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod))); - const llvm_i16 = o.context.intType(16); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod))); - const llvm_i32 = o.context.intType(32); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod))); - const llvm_i64 = o.context.intType(64); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80, mod); - const repr = std.math.break_f80(float); - const llvm_i80 = o.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod))); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = o.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } + .enum_tag => |enum_tag| o.lowerValue(enum_tag.int), + .float => switch (ty.floatBits(target)) { + 16 => if (backendSupportsF16(target)) + try o.builder.halfConst(val.toFloat(f16, mod)) + else + try o.builder.intConst(.i16, @as(i16, @bitCast(val.toFloat(f16, mod)))), + 32 => try o.builder.floatConst(val.toFloat(f32, mod)), + 64 => try o.builder.doubleConst(val.toFloat(f64, mod)), + 80 => if (backendSupportsF80(target)) + try o.builder.x86_fp80Const(val.toFloat(f80, mod)) + else + try o.builder.intConst(.i80, @as(i80, @bitCast(val.toFloat(f80, mod)))), + 128 => try o.builder.fp128Const(val.toFloat(f128, mod)), + else => unreachable, }, .ptr => |ptr| { - const ptr_tv: TypedValue = switch (ptr.len) { - .none => tv, - else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + const ptr_ty = switch (ptr.len) { + .none => ty, + else => ty.slicePtrFieldType(mod), }; - const llvm_ptr_val = switch (ptr.addr) { - .decl => |decl| try o.lowerDeclRefValue(ptr_tv, decl), - .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| try o.lowerIntAsPtr(int.toValue()), + const ptr_val = switch (ptr.addr) { + .decl => |decl| try o.lowerDeclRefValue(ptr_ty, decl), + .mut_decl => |mut_decl| try o.lowerDeclRefValue(ptr_ty, mut_decl.decl), + .int => |int| try o.lowerIntAsPtr(int), .eu_payload, .opt_payload, .elem, .field, - => try o.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0), + => try o.lowerParentPtr(val, ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0), .comptime_field => unreachable, }; switch (ptr.len) { - .none => return llvm_ptr_val, - else => { - const fields: [2]*llvm.Value = .{ - llvm_ptr_val, - try o.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), - }; - return o.context.constStruct(&fields, fields.len, .False); - }, + .none => return ptr_val, + else => return o.builder.structConst(try o.lowerType(ty), &.{ + ptr_val, try o.lowerValue(ptr.len), + }), } }, .opt => |opt| { comptime assert(optional_layout_version == 3); - const payload_ty = tv.ty.optionalChild(mod); + const payload_ty = ty.optionalChild(mod); - const llvm_i8 = o.context.intType(8); - const non_null_bit = switch (opt.val) { - .none => llvm_i8.constNull(), - else => llvm_i8.constInt(1, .False), - }; + const non_null_bit = try o.builder.intConst(.i8, @intFromBool(opt.val != .none)); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } - const llvm_ty = try o.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { - .none => llvm_ty.constNull(), - else => |payload| o.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), + const llvm_ty = try o.lowerType(ty); + if (ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => switch (llvm_ty.tag(&o.builder)) { + .integer => try o.builder.intConst(llvm_ty, 0), + .pointer => try o.builder.nullConst(llvm_ty), + .structure => try o.builder.zeroInitConst(llvm_ty), + else => unreachable, + }, + else => |payload| try o.lowerValue(payload), }; assert(payload_ty.zigTypeTag(mod) != .Fn); - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try o.lowerValue(.{ - .ty = payload_ty, - .val = switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), - else => |payload| payload, - }.toValue(), + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; + vals[0] = try o.lowerValue(switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => |payload| payload, }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + vals[1] = non_null_bit; + fields[0] = vals[0].typeOf(&o.builder); + fields[1] = vals[1].typeOf(&o.builder); + + const llvm_ty_fields = llvm_ty.structFields(&o.builder); + if (llvm_ty_fields.len > 2) { + assert(llvm_ty_fields.len == 3); + fields[2] = llvm_ty_fields[2]; + vals[2] = try o.builder.undefConst(fields[2]); } - return o.context.constStruct(&fields_buf, llvm_field_count, .False); + return o.builder.structConst(try o.builder.structType( + llvm_ty.structKind(&o.builder), + fields[0..llvm_ty_fields.len], + ), vals[0..llvm_ty_fields.len]); }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { - .array_type => switch (aggregate.storage) { - .bytes => |bytes| return o.context.constString( - bytes.ptr, - @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ), - .elems => |elem_vals| { - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); - defer gpa.free(llvm_elems); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .array_type => |array_type| switch (aggregate.storage) { + .bytes => |bytes| try o.builder.stringConst(try o.builder.string(bytes)), + .elems => |elems| { + const array_ty = try o.lowerType(ty); + const elem_ty = array_ty.childType(&o.builder); + assert(elems.len == array_ty.aggregateLen(&o.builder)); + + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, elems.len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, elems.len); + defer allocator.free(fields); + var need_unnamed = false; - for (elem_vals, 0..) |elem_val, i| { - llvm_elems[i] = try o.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return o.context.constStruct( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - .True, - ); - } else { - const llvm_elem_ty = try o.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); + for (vals, fields, elems) |*result_val, *result_field, elem| { + result_val.* = try o.lowerValue(elem); + result_field.* = result_val.typeOf(&o.builder); + if (result_field.* != elem_ty) need_unnamed = true; } + return if (need_unnamed) try o.builder.structConst( + try o.builder.structType(.normal, fields), + vals, + ) else try o.builder.arrayConst(array_ty, vals); }, - .repeated_elem => |val| { - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @as(usize, @intCast(tv.ty.arrayLen(mod))); - const len_including_sent = len + @intFromBool(sentinel != null); - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + .repeated_elem => |elem| { + const len: usize = @intCast(array_type.len); + const len_including_sentinel: usize = + @intCast(len + @intFromBool(array_type.sentinel != .none)); + const array_ty = try o.lowerType(ty); + const elem_ty = array_ty.childType(&o.builder); + + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, len_including_sentinel); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, len_including_sentinel); + defer allocator.free(fields); var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try o.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); - } - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[0]); - } - - if (sentinel) |sent| { - llvm_elems[len] = try o.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or o.isUnnamedType(elem_ty, llvm_elems[len]); + @memset(vals[0..len], try o.lowerValue(elem)); + @memset(fields[0..len], vals[0].typeOf(&o.builder)); + if (fields[0] != elem_ty) need_unnamed = true; + + if (array_type.sentinel != .none) { + vals[len] = try o.lowerValue(array_type.sentinel); + fields[len] = vals[len].typeOf(&o.builder); + if (fields[len] != elem_ty) need_unnamed = true; } - if (need_unnamed) { - return o.context.constStruct( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - .True, - ); - } else { - const llvm_elem_ty = try o.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); - } + return if (need_unnamed) try o.builder.structConst( + try o.builder.structType(.@"packed", fields), + vals, + ) else try o.builder.arrayConst(array_ty, vals); }, }, .vector_type => |vector_type| { - const elem_ty = vector_type.child.toType(); - const llvm_elems = try gpa.alloc(*llvm.Value, vector_type.len); - defer gpa.free(llvm_elems); - const llvm_i8 = o.context.intType(8); - for (llvm_elems, 0..) |*llvm_elem, i| { - llvm_elem.* = switch (aggregate.storage) { - .bytes => |bytes| llvm_i8.constInt(bytes[i], .False), - .elems => |elems| try o.lowerValue(.{ - .ty = elem_ty, - .val = elems[i].toValue(), - }), - .repeated_elem => |elem| try o.lowerValue(.{ - .ty = elem_ty, - .val = elem.toValue(), - }), - }; + const vector_ty = try o.lowerType(ty); + switch (aggregate.storage) { + .bytes, .elems => { + const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, vector_type.len); + defer allocator.free(vals); + + switch (aggregate.storage) { + .bytes => |bytes| for (vals, bytes) |*result_val, byte| { + result_val.* = try o.builder.intConst(.i8, byte); + }, + .elems => |elems| for (vals, elems) |*result_val, elem| { + result_val.* = try o.lowerValue(elem); + }, + .repeated_elem => unreachable, + } + return o.builder.vectorConst(vector_ty, vals); + }, + .repeated_elem => |elem| return o.builder.splatConst( + vector_ty, + try o.lowerValue(elem), + ), } - return llvm.constVector( - llvm_elems.ptr, - @as(c_uint, @intCast(llvm_elems.len)), - ); }, .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + const struct_ty = try o.lowerType(ty); + const llvm_len = struct_ty.aggregateLen(&o.builder); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, llvm_len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, llvm_len); + defer allocator.free(fields); comptime assert(struct_layout_version == 2); + var llvm_index: usize = 0; var offset: u64 = 0; var big_align: u32 = 0; var need_unnamed = false; - - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + for (tuple.types, tuple.values, 0..) |field_ty, field_val, field_index| { if (field_val != .none) continue; if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; @@ -3493,20 +3879,20 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } - const field_llvm_val = try o.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); - - need_unnamed = need_unnamed or o.isUnnamedType(field_ty.toType(), field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); + vals[llvm_index] = + try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); + if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) + need_unnamed = true; + llvm_index += 1; offset += field_ty.toType().abiSize(mod); } @@ -3515,73 +3901,71 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } } + assert(llvm_index == llvm_len); - if (need_unnamed) { - return o.context.constStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - .False, - ); - } else { - const llvm_struct_ty = try o.lowerType(tv.ty); - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - ); - } + return o.builder.structConst(if (need_unnamed) + try o.builder.structType(struct_ty.structKind(&o.builder), fields) + else + struct_ty, vals); }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const llvm_struct_ty = try o.lowerType(tv.ty); - + assert(struct_obj.haveLayout()); + const struct_ty = try o.lowerType(ty); if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits))); - const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_int = try o.builder.intConst(struct_ty, 0); var running_bits: u16 = 0; - for (fields, 0..) |field, i| { + for (struct_obj.fields.values(), 0..) |field, field_index| { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const non_int_val = try o.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, i), - }); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = o.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); + const non_int_val = + try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern()); + const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod)); + const small_int_ty = try o.builder.intType(ty_bit_size); + const small_int_val = try o.builder.castConst( + if (field.ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast, + non_int_val, + small_int_ty, + ); + const shift_rhs = try o.builder.intConst(struct_ty, running_bits); + const extended_int_val = + try o.builder.convConst(.unsigned, small_int_val, struct_ty); + const shifted = try o.builder.binConst(.shl, extended_int_val, shift_rhs); + running_int = try o.builder.binConst(.@"or", running_int, shifted); running_bits += ty_bit_size; } return running_int; } + const llvm_len = struct_ty.aggregateLen(&o.builder); - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + const ExpectedContents = extern struct { + vals: [Builder.expected_fields_len]Builder.Constant, + fields: [Builder.expected_fields_len]Builder.Type, + }; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); + const allocator = stack.get(); + const vals = try allocator.alloc(Builder.Constant, llvm_len); + defer allocator.free(vals); + const fields = try allocator.alloc(Builder.Type, llvm_len); + defer allocator.free(fields); comptime assert(struct_layout_version == 2); + var llvm_index: usize = 0; var offset: u64 = 0; var big_align: u32 = 0; var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { + var field_it = struct_obj.runtimeFieldIterator(mod); + while (field_it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); @@ -3590,20 +3974,22 @@ pub const Object = struct { const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); // TODO make this and all other padding elsewhere in debug // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == + struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } - const field_llvm_val = try o.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, field_and_index.index), - }); - - need_unnamed = need_unnamed or o.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); + vals[llvm_index] = try o.lowerValue( + (try val.fieldValue(mod, field_and_index.index)).toIntern(), + ); + fields[llvm_index] = vals[llvm_index].typeOf(&o.builder); + if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index]) + need_unnamed = true; + llvm_index += 1; offset += field.ty.abiSize(mod); } @@ -3612,202 +3998,158 @@ pub const Object = struct { offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { - const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len))); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + fields[llvm_index] = try o.builder.arrayType(padding_len, .i8); + vals[llvm_index] = try o.builder.undefConst(fields[llvm_index]); + assert(fields[llvm_index] == struct_ty.structFields(&o.builder)[llvm_index]); + llvm_index += 1; } } + assert(llvm_index == llvm_len); - if (need_unnamed) { - return o.context.constStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @as(c_uint, @intCast(llvm_fields.items.len)), - ); - } + return o.builder.structConst(if (need_unnamed) + try o.builder.structType(struct_ty.structKind(&o.builder), fields) + else + struct_ty, vals); }, else => unreachable, }, - .un => { - const llvm_union_ty = try o.lowerType(tv.ty); - const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { - .none => tv.val.castTag(.@"union").?.data, - else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { - .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, - else => unreachable, - }, - }; - - const layout = tv.ty.unionGetLayout(mod); + .un => |un| { + const union_ty = try o.lowerType(ty); + const layout = ty.unionGetLayout(mod); + if (layout.payload_size == 0) return o.lowerValue(un.tag); - if (layout.payload_size == 0) { - return lowerValue(o, .{ - .ty = tv.ty.unionTagTypeSafety(mod).?, - .val = tag_and_val.tag, - }); - } - const union_obj = mod.typeToUnion(tv.ty).?; - const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, o.module).?; + const union_obj = mod.typeToUnion(ty).?; + const field_index = ty.unionTagFieldIndex(un.tag.toValue(), o.module).?; assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits(mod)) - return llvm_union_ty.constNull(); - const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod))); - const small_int_ty = o.context.intType(ty_bit_size); - const small_int_val = if (field_ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - return small_int_val.constZExtOrBitCast(llvm_union_ty); + if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0); + const small_int_val = try o.builder.castConst( + if (field_ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast, + try o.lowerValue(un.val), + try o.builder.intType(@intCast(field_ty.bitSize(mod))), + ); + return o.builder.convConst(.unsigned, small_int_val, union_ty); } // Sometimes we must make an unnamed struct because LLVM does // not support bitcasting our payload struct to the true union payload type. // Instead we use an unnamed struct and every reference to the global // must pointer cast to the expected type before accessing the union. - var need_unnamed: bool = layout.most_aligned_field != field_index; + var need_unnamed = layout.most_aligned_field != field_index; const payload = p: { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p o.context.intType(8).arrayType(padding_len).getUndef(); + const padding_len = layout.payload_size; + break :p try o.builder.undefConst(try o.builder.arrayType(padding_len, .i8)); } - const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val }); - need_unnamed = need_unnamed or o.isUnnamedType(field_ty, field); + const payload = try o.lowerValue(un.val); + const payload_ty = payload.typeOf(&o.builder); + if (payload_ty != union_ty.structFields(&o.builder)[ + @intFromBool(layout.tag_align >= layout.payload_align) + ]) need_unnamed = true; const field_size = field_ty.abiSize(mod); - if (field_size == layout.payload_size) { - break :p field; - } - const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); - const fields: [2]*llvm.Value = .{ - field, o.context.intType(8).arrayType(padding_len).getUndef(), - }; - break :p o.context.constStruct(&fields, fields.len, .True); + if (field_size == layout.payload_size) break :p payload; + const padding_len = layout.payload_size - field_size; + const padding_ty = try o.builder.arrayType(padding_len, .i8); + break :p try o.builder.structConst( + try o.builder.structType(.@"packed", &.{ payload_ty, padding_ty }), + &.{ payload, try o.builder.undefConst(padding_ty) }, + ); }; + const payload_ty = payload.typeOf(&o.builder); - if (layout.tag_size == 0) { - const fields: [1]*llvm.Value = .{payload}; - if (need_unnamed) { - return o.context.constStruct(&fields, fields.len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields.len); - } - } - const llvm_tag_value = try lowerValue(o, .{ - .ty = tv.ty.unionTagTypeSafety(mod).?, - .val = tag_and_val.tag, - }); - var fields: [3]*llvm.Value = undefined; - var fields_len: c_uint = 2; + if (layout.tag_size == 0) return o.builder.structConst(if (need_unnamed) + try o.builder.structType(union_ty.structKind(&o.builder), &.{payload_ty}) + else + union_ty, &.{payload}); + const tag = try o.lowerValue(un.tag); + const tag_ty = tag.typeOf(&o.builder); + var fields: [3]Builder.Type = undefined; + var vals: [3]Builder.Constant = undefined; + var len: usize = 2; if (layout.tag_align >= layout.payload_align) { - fields = .{ llvm_tag_value, payload, undefined }; + fields = .{ tag_ty, payload_ty, undefined }; + vals = .{ tag, payload, undefined }; } else { - fields = .{ payload, llvm_tag_value, undefined }; + fields = .{ payload_ty, tag_ty, undefined }; + vals = .{ payload, tag, undefined }; } if (layout.padding != 0) { - fields[2] = o.context.intType(8).arrayType(layout.padding).getUndef(); - fields_len = 3; - } - if (need_unnamed) { - return o.context.constStruct(&fields, fields_len, .False); - } else { - return llvm_union_ty.constNamedStruct(&fields, fields_len); + fields[2] = try o.builder.arrayType(layout.padding, .i8); + vals[2] = try o.builder.undefConst(fields[2]); + len = 3; } + return o.builder.structConst(if (need_unnamed) + try o.builder.structType(union_ty.structKind(&o.builder), fields[0..len]) + else + union_ty, vals[0..len]); }, .memoized_call => unreachable, - } + }; } - fn lowerIntAsPtr(o: *Object, val: Value) Error!*llvm.Value { + fn lowerIntAsPtr(o: *Object, val: InternPool.Index) Allocator.Error!Builder.Constant { const mod = o.module; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => return o.context.pointerType(0).getUndef(), + switch (mod.intern_pool.indexToKey(val)) { + .undef => return o.builder.undefConst(.ptr), .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_space, mod); - const llvm_int = lowerBigInt(o, Type.usize, bigint); - return llvm_int.constIntToPtr(o.context.pointerType(0)); + const bigint = val.toValue().toBigInt(&bigint_space, mod); + const llvm_int = try lowerBigInt(o, Type.usize, bigint); + return o.builder.castConst(.inttoptr, llvm_int, .ptr); }, else => unreachable, } } - fn lowerBigInt(o: *Object, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { + fn lowerBigInt( + o: *Object, + ty: Type, + bigint: std.math.big.int.Const, + ) Allocator.Error!Builder.Constant { const mod = o.module; - const int_info = ty.intInfo(mod); - assert(int_info.bits != 0); - const llvm_type = o.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @as(c_uint, @intCast(bigint.limbs.len)), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; + return o.builder.bigIntConst(try o.builder.intType(ty.intInfo(mod).bits), bigint); } const ParentPtr = struct { ty: Type, - llvm_ptr: *llvm.Value, + llvm_ptr: Builder.Value, }; - fn lowerParentPtrDecl( - o: *Object, - ptr_val: Value, - decl_index: Module.Decl.Index, - ) Error!*llvm.Value { + fn lowerParentPtrDecl(o: *Object, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant { const mod = o.module; const decl = mod.declPtr(decl_index); try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); - return try o.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); + return o.lowerDeclRefValue(ptr_ty, decl_index); } - fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { + fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant { const mod = o.module; - const target = mod.getTarget(); return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { - .decl => |decl| o.lowerParentPtrDecl(ptr_val, decl), - .mut_decl => |mut_decl| o.lowerParentPtrDecl(ptr_val, mut_decl.decl), - .int => |int| o.lowerIntAsPtr(int.toValue()), + .decl => |decl| o.lowerParentPtrDecl(decl), + .mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl), + .int => |int| try o.lowerIntAsPtr(int), .eu_payload => |eu_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true); + const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true); const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); const payload_ty = eu_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. - return parent_llvm_ptr; + return parent_ptr; } - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = o.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try o.lowerType(eu_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + const index: u32 = + if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1; + return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index), + }); }, .opt_payload => |opt_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true); + const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true); const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); const payload_ty = opt_ty.optionalChild(mod); @@ -3816,99 +4158,89 @@ pub const Object = struct { { // In this case, we represent pointer to optional the same as pointer // to the payload. - return parent_llvm_ptr; + return parent_ptr; } - const llvm_u32 = o.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try o.lowerType(opt_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, try o.lowerType(opt_ty), parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, 0), + }); }, .comptime_field => unreachable, .elem => |elem_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); - - const llvm_usize = try o.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; + const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true); const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - const elem_llvm_ty = try o.lowerType(elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + + return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{ + try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index), + }); }, .field => |field_ptr| { - const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); + const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - const field_index = @as(u32, @intCast(field_ptr.index)); - const llvm_u32 = o.context.intType(32); + const field_index: u32 = @intCast(field_ptr.index); switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; + return parent_ptr; } const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. - return parent_llvm_ptr; + return parent_ptr; } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @intFromBool(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), - }; + const parent_llvm_ty = try o.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, @intFromBool( + layout.tag_size > 0 and layout.tag_align >= layout.payload_align, + )), + }); }, .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = o.context.intType(target.ptrBitWidth()); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); + if (!byte_aligned) return parent_ptr; + const llvm_usize = try o.lowerType(Type.usize); + const base_addr = + try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @as(usize, @intCast(field.ty.bitSize(mod))); + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(field.ty.bitSize(mod)); } break :b b; }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = o.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); + const byte_offset = try o.builder.intConst(llvm_usize, prev_bits / 8); + const field_addr = try o.builder.binConst(.add, base_addr, byte_offset); + return o.builder.castConst(.inttoptr, field_addr, .ptr); } - const parent_llvm_ty = try o.lowerType(parent_ty); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const llvm_index = llvm_u32.constInt(@intFromBool(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } + return o.builder.gepConst( + .inbounds, + try o.lowerType(parent_ty), + parent_ptr, + null, + if (llvmField(parent_ty, field_index, mod)) |llvm_field| &.{ + try o.builder.intConst(.i32, 0), + try o.builder.intConst(.i32, llvm_field.index), + } else &.{ + try o.builder.intConst(.i32, @intFromBool( + parent_ty.hasRuntimeBitsIgnoreComptime(mod), + )), + }, + ); }, .Pointer => { assert(parent_ty.isSlice(mod)); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), - }; const parent_llvm_ty = try o.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{ + try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, field_index), + }); }, else => unreachable, } @@ -3916,11 +4248,7 @@ pub const Object = struct { }; } - fn lowerDeclRefValue( - o: *Object, - tv: TypedValue, - decl_index: Module.Decl.Index, - ) Error!*llvm.Value { + fn lowerDeclRefValue(o: *Object, ty: Type, decl_index: Module.Decl.Index) Allocator.Error!Builder.Constant { const mod = o.module; // In the case of something like: @@ -3931,69 +4259,59 @@ pub const Object = struct { const decl = mod.declPtr(decl_index); if (decl.val.getFunction(mod)) |func| { if (func.owner_decl != decl_index) { - return o.lowerDeclRefValue(tv, func.owner_decl); + return o.lowerDeclRefValue(ty, func.owner_decl); } } else if (decl.val.getExternFunc(mod)) |func| { if (func.decl != decl_index) { - return o.lowerDeclRefValue(tv, func.decl); + return o.lowerDeclRefValue(ty, func.decl); } } const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or - (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) - { - return o.lowerPtrToVoid(tv.ty); - } + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) return o.lowerPtrToVoid(ty); try mod.markDeclAlive(decl); - const llvm_decl_val = if (is_fn_body) - try o.resolveLlvmFunction(decl_index) + const llvm_global = if (is_fn_body) + (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global else - try o.resolveGlobalDecl(decl_index); + (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global; - const target = mod.getTarget(); - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { - const llvm_decl_wanted_ptr_ty = o.context.pointerType(llvm_wanted_addrspace); - break :blk llvm_decl_val.constAddrSpaceCast(llvm_decl_wanted_ptr_ty); - } else llvm_decl_val; - - const llvm_type = try o.lowerType(tv.ty); - if (tv.ty.zigTypeTag(mod) == .Int) { - return llvm_val.constPtrToInt(llvm_type); - } else { - return llvm_val.constBitCast(llvm_type); - } + const llvm_val = try o.builder.convConst( + .unneeded, + llvm_global.toConst(), + try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())), + ); + + return o.builder.convConst(if (ty.isAbiInt(mod)) switch (ty.intInfo(mod).signedness) { + .signed => .signed, + .unsigned => .unsigned, + } else .unneeded, llvm_val, try o.lowerType(ty)); } - fn lowerPtrToVoid(o: *Object, ptr_ty: Type) !*llvm.Value { + fn lowerPtrToVoid(o: *Object, ptr_ty: Type) Allocator.Error!Builder.Constant { const mod = o.module; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try o.lowerType(Type.usize); - const llvm_ptr_ty = try o.lowerType(ptr_ty); - if (ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional()) |alignment| { - return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); - } - // Note that these 0xaa values are appropriate even in release-optimized builds - // because we need a well-defined value that is not null, and LLVM does not - // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR - // instruction is followed by a `wrap_optional`, it will return this value - // verbatim, and the result should test as non-null. - const target = mod.getTarget(); - const int = switch (target.ptrBitWidth()) { - 16 => llvm_usize.constInt(0xaaaa, .False), - 32 => llvm_usize.constInt(0xaaaaaaaa, .False), - 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False), + const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse + // Note that these 0xaa values are appropriate even in release-optimized builds + // because we need a well-defined value that is not null, and LLVM does not + // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR + // instruction is followed by a `wrap_optional`, it will return this value + // verbatim, and the result should test as non-null. + switch (mod.getTarget().ptrBitWidth()) { + 16 => 0xaaaa, + 32 => 0xaaaaaaaa, + 64 => 0xaaaaaaaa_aaaaaaaa, else => unreachable, }; - return int.constIntToPtr(llvm_ptr_ty); + const llvm_usize = try o.lowerType(Type.usize); + const llvm_ptr_ty = try o.lowerType(ptr_ty); + return o.builder.castConst(.inttoptr, try o.builder.intConst(llvm_usize, int), llvm_ptr_ty); } fn addAttr(o: *Object, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void { @@ -4023,7 +4341,7 @@ pub const Object = struct { ) void { const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len); assert(kind_id != 0); - const llvm_attr = o.context.createEnumAttribute(kind_id, int); + const llvm_attr = o.builder.llvm.context.createEnumAttribute(kind_id, int); val.addAttributeAtIndex(index, llvm_attr); } @@ -4034,11 +4352,11 @@ pub const Object = struct { name: []const u8, value: []const u8, ) void { - const llvm_attr = o.context.createStringAttribute( + const llvm_attr = o.builder.llvm.context.createStringAttribute( name.ptr, - @as(c_uint, @intCast(name.len)), + @intCast(name.len), value.ptr, - @as(c_uint, @intCast(value.len)), + @intCast(value.len), ); val.addAttributeAtIndex(index, llvm_attr); } @@ -4063,23 +4381,23 @@ pub const Object = struct { /// widen it before using it and then truncate the result. /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. - fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { + fn getAtomicAbiType(o: *Object, ty: Type, is_rmw_xchg: bool) Allocator.Error!Builder.Type { const mod = o.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, .Enum => ty.intTagType(mod), .Float => { - if (!is_rmw_xchg) return null; - return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8))); + if (!is_rmw_xchg) return .none; + return o.builder.intType(@intCast(ty.abiSize(mod) * 8)); }, - .Bool => return o.context.intType(8), - else => return null, + .Bool => return .i8, + else => return .none, }; const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8))); + return o.builder.intType(@intCast(int_ty.abiSize(mod) * 8)); } else { - return null; + return .none; } } @@ -4120,13 +4438,13 @@ pub const Object = struct { llvm_arg_i: u32, alignment: u32, byval_attr: bool, - param_llvm_ty: *llvm.Type, + param_llvm_ty: Builder.Type, ) void { o.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); o.addArgAttr(llvm_fn, llvm_arg_i, "readonly"); o.addArgAttrInt(llvm_fn, llvm_arg_i, "align", alignment); if (byval_attr) { - llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty); + llvm_fn.addByValAttr(llvm_arg_i, param_llvm_ty.toLlvm(&o.builder)); } } }; @@ -4159,20 +4477,26 @@ pub const DeclGen = struct { _ = try o.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); - var global = try o.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(mod)); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s); + const variable = try o.resolveGlobalDecl(decl_index); + const global = variable.ptrConst(&o.builder).global; + var llvm_global = global.toLlvm(&o.builder); + variable.ptr(&o.builder).alignment = Builder.Alignment.fromByteUnits(decl.getAlignment(mod)); + llvm_global.setAlignment(decl.getAlignment(mod)); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| { + variable.ptr(&o.builder).section = try o.builder.string(section); + llvm_global.setSection(section); + } assert(decl.has_tv); - const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { - break :init_val variable.init; - } else init_val: { - global.setGlobalConstant(.True); + const init_val = if (decl.val.getVariable(mod)) |decl_var| decl_var.init else init_val: { + variable.ptr(&o.builder).mutability = .constant; + llvm_global.setGlobalConstant(.True); break :init_val decl.val.toIntern(); }; if (init_val != .none) { - const llvm_init = try o.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() }); - if (global.globalGetValueType() == llvm_init.typeOf()) { - global.setInitializer(llvm_init); + const llvm_init = try o.lowerValue(init_val); + const llvm_init_ty = llvm_init.typeOf(&o.builder); + if (global.ptrConst(&o.builder).type == llvm_init_ty) { + llvm_global.setInitializer(llvm_init.toLlvm(&o.builder)); } else { // LLVM does not allow us to change the type of globals. So we must // create a new global with the correct type, copy all its attributes, @@ -4189,23 +4513,27 @@ pub const DeclGen = struct { // Related: https://github.com/ziglang/zig/issues/13265 const llvm_global_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const new_global = o.llvm_module.addGlobalInAddressSpace( - llvm_init.typeOf(), + llvm_init_ty.toLlvm(&o.builder), "", - llvm_global_addrspace, + @intFromEnum(llvm_global_addrspace), ); - new_global.setLinkage(global.getLinkage()); - new_global.setUnnamedAddr(global.getUnnamedAddress()); - new_global.setAlignment(global.getAlignment()); - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| - new_global.setSection(s); - new_global.setInitializer(llvm_init); + new_global.setLinkage(llvm_global.getLinkage()); + new_global.setUnnamedAddr(llvm_global.getUnnamedAddress()); + new_global.setAlignment(llvm_global.getAlignment()); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| + new_global.setSection(section); + new_global.setInitializer(llvm_init.toLlvm(&o.builder)); // TODO: How should this work then the address space of a global changed? - global.replaceAllUsesWith(new_global); - o.decl_map.putAssumeCapacity(decl_index, new_global); - new_global.takeName(global); - global.deleteGlobal(); - global = new_global; + llvm_global.replaceAllUsesWith(new_global); + new_global.takeName(llvm_global); + o.builder.llvm.globals.items[@intFromEnum(variable.ptrConst(&o.builder).global)] = + new_global; + llvm_global.deleteGlobal(); + llvm_global = new_global; + variable.ptr(&o.builder).mutability = .global; + global.ptr(&o.builder).type = llvm_init_ty; } + variable.ptr(&o.builder).init = llvm_init; } if (o.di_builder) |dib| { @@ -4216,7 +4544,7 @@ pub const DeclGen = struct { const di_global = dib.createGlobalVariableExpression( di_file.toScope(), mod.intern_pool.stringToSlice(decl.name), - global.getValueName(), + llvm_global.getValueName(), di_file, line_number, try o.lowerDebugType(decl.ty, .full), @@ -4224,7 +4552,7 @@ pub const DeclGen = struct { ); try o.di_map.put(o.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) llvm_global.attachMetaData(di_global); } } } @@ -4235,7 +4563,7 @@ pub const FuncGen = struct { dg: *DeclGen, air: Air, liveness: Liveness, - context: *llvm.Context, + wip: Builder.WipFunction, builder: *llvm.Builder, di_scope: ?*llvm.DIScope, di_file: ?*llvm.DIFile, @@ -4252,43 +4580,44 @@ pub const FuncGen = struct { /// This stores the LLVM values used in a function, such that they can be referred to /// in other instructions. This table is cleared before every function is generated. - func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *llvm.Value), + func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, Builder.Value), /// If the return type is sret, this is the result pointer. Otherwise null. /// Note that this can disagree with isByRef for the return type in the case /// of C ABI functions. - ret_ptr: ?*llvm.Value, + ret_ptr: Builder.Value, /// Any function that needs to perform Valgrind client requests needs an array alloca /// instruction, however a maximum of one per function is needed. - valgrind_client_request_array: ?*llvm.Value = null, + valgrind_client_request_array: Builder.Value = .none, /// These fields are used to refer to the LLVM value of the function parameters /// in an Arg instruction. /// This list may be shorter than the list according to the zig type system; /// it omits 0-bit types. If the function uses sret as the first parameter, /// this slice does not include it. - args: []const *llvm.Value, - arg_index: c_uint, + args: []const Builder.Value, + arg_index: usize, - llvm_func: *llvm.Value, - - err_ret_trace: ?*llvm.Value = null, + err_ret_trace: Builder.Value = .none, /// This data structure is used to implement breaking to blocks. blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { - parent_bb: *llvm.BasicBlock, + parent_bb: Builder.Function.Block.Index, breaks: *BreakList, }), - single_threaded: bool, + sync_scope: Builder.SyncScope, const DbgState = struct { loc: *llvm.DILocation, scope: *llvm.DIScope, base_line: u32 }; - const BreakList = std.MultiArrayList(struct { - bb: *llvm.BasicBlock, - val: *llvm.Value, - }); + const BreakList = union { + list: std.MultiArrayList(struct { + bb: Builder.Function.Block.Index, + val: Builder.Value, + }), + len: usize, + }; fn deinit(self: *FuncGen) void { - self.builder.dispose(); + self.wip.deinit(); self.dbg_inlined.deinit(self.gpa); self.dbg_block_stack.deinit(self.gpa); self.func_inst_table.deinit(self.gpa); @@ -4300,7 +4629,7 @@ pub const FuncGen = struct { return self.dg.todo(format, args); } - fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*llvm.Value { + fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value { const gpa = self.gpa; const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; @@ -4311,14 +4640,14 @@ pub const FuncGen = struct { .ty = self.typeOf(inst), .val = (try self.air.value(inst, mod)).?, }); - gop.value_ptr.* = llvm_val; - return llvm_val; + gop.value_ptr.* = llvm_val.toValue(); + return llvm_val.toValue(); } - fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { + fn resolveValue(self: *FuncGen, tv: TypedValue) Error!Builder.Constant { const o = self.dg.object; const mod = o.module; - const llvm_val = try o.lowerValue(tv); + const llvm_val = try o.lowerValue(tv.val.toIntern()); if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and @@ -4326,17 +4655,50 @@ pub const FuncGen = struct { const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); - const global = o.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); - global.setInitializer(llvm_val); - global.setLinkage(.Private); - global.setGlobalConstant(.True); - global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(mod)); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) - else - global; - return addrspace_casted_ptr; + const llvm_ty = llvm_val.typeOf(&o.builder); + const llvm_alignment = tv.ty.abiAlignment(mod); + const llvm_global = o.llvm_module.addGlobalInAddressSpace(llvm_ty.toLlvm(&o.builder), "", @intFromEnum(llvm_actual_addrspace)); + llvm_global.setInitializer(llvm_val.toLlvm(&o.builder)); + llvm_global.setLinkage(.Private); + llvm_global.setGlobalConstant(.True); + llvm_global.setUnnamedAddr(.True); + llvm_global.setAlignment(llvm_alignment); + + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .addr_space = llvm_actual_addrspace, + .type = llvm_ty, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = llvm_val, + .alignment = Builder.Alignment.fromByteUnits(llvm_alignment), + }; + try o.builder.llvm.globals.append(o.gpa, llvm_global); + const global_index = try o.builder.addGlobal(.empty, global); + try o.builder.variables.append(o.gpa, variable); + + return o.builder.convConst( + .unneeded, + global_index.toConst(), + try o.builder.ptrType(llvm_wanted_addrspace), + ); + } + + fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { + const o = self.dg.object; + const mod = o.module; + if (o.null_opt_usize == .no_init) { + const ty = try mod.intern(.{ .opt_type = .usize_type }); + o.null_opt_usize = try self.resolveValue(.{ + .ty = ty.toType(), + .val = (try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } })).toValue(), + }); + } + return o.null_opt_usize; } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { @@ -4345,10 +4707,9 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) - continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - const opt_value: ?*llvm.Value = switch (air_tags[inst]) { + const val: Builder.Value = switch (air_tags[inst]) { // zig fmt: off .add => try self.airAdd(inst, false), .add_optimized => try self.airAdd(inst, true), @@ -4439,15 +4800,15 @@ pub const FuncGen = struct { .cmp_vector_optimized => try self.airCmpVector(inst, true), .cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst), - .is_non_null => try self.airIsNonNull(inst, false, .NE), - .is_non_null_ptr => try self.airIsNonNull(inst, true , .NE), - .is_null => try self.airIsNonNull(inst, false, .EQ), - .is_null_ptr => try self.airIsNonNull(inst, true , .EQ), + .is_non_null => try self.airIsNonNull(inst, false, .ne), + .is_non_null_ptr => try self.airIsNonNull(inst, true , .ne), + .is_null => try self.airIsNonNull(inst, false, .eq), + .is_null_ptr => try self.airIsNonNull(inst, true , .eq), - .is_non_err => try self.airIsErr(inst, .EQ, false), - .is_non_err_ptr => try self.airIsErr(inst, .EQ, true), - .is_err => try self.airIsErr(inst, .NE, false), - .is_err_ptr => try self.airIsErr(inst, .NE, true), + .is_non_err => try self.airIsErr(inst, .eq, false), + .is_non_err_ptr => try self.airIsErr(inst, .eq, true), + .is_err => try self.airIsErr(inst, .ne, false), + .is_err_ptr => try self.airIsErr(inst, .ne, true), .alloc => try self.airAlloc(inst), .ret_ptr => try self.airRetPtr(inst), @@ -4524,10 +4885,10 @@ pub const FuncGen = struct { .reduce => try self.airReduce(inst, false), .reduce_optimized => try self.airReduce(inst, true), - .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), - .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), - .atomic_store_release => try self.airAtomicStore(inst, .Release), - .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent), + .atomic_store_unordered => try self.airAtomicStore(inst, .unordered), + .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic), + .atomic_store_release => try self.airAtomicStore(inst, .release), + .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst), .struct_field_ptr => try self.airStructFieldPtr(inst), .struct_field_val => try self.airStructFieldVal(body[i..]), @@ -4569,8 +4930,8 @@ pub const FuncGen = struct { .inferred_alloc, .inferred_alloc_comptime => unreachable, - .unreach => self.airUnreach(inst), - .dbg_stmt => self.airDbgStmt(inst), + .unreach => try self.airUnreach(inst), + .dbg_stmt => try self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), .dbg_inline_end => try self.airDbgInlineEnd(inst), .dbg_block_begin => try self.airDbgBlockBegin(), @@ -4588,17 +4949,14 @@ pub const FuncGen = struct { .work_group_id => try self.airWorkGroupId(inst), // zig fmt: on }; - if (opt_value) |val| { - const ref = Air.indexToRef(inst); - try self.func_inst_table.putNoClobber(self.gpa, ref, val); - } + if (val != .none) try self.func_inst_table.putNoClobber(self.gpa, Air.indexToRef(inst), val); } } - fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value { + fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); + const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const o = self.dg.object; const mod = o.module; const ip = &mod.intern_pool; @@ -4619,19 +4977,21 @@ pub const FuncGen = struct { const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try o.lowerType(return_type); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); - try llvm_args.append(ret_ptr); + const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment); + try llvm_args.append(ret_ptr.toLlvm(&self.wip)); break :blk ret_ptr; }; const err_return_tracing = return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { - try llvm_args.append(self.err_ret_trace.?); + assert(self.err_ret_trace != .none); + try llvm_args.append(self.err_ret_trace.toLlvm(&self.wip)); } var it = iterateParamTypes(o, fn_info); - while (it.nextCall(self, args)) |lowering| switch (lowering) { + while (try it.nextCall(self, args)) |lowering| switch (lowering) { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; @@ -4639,12 +4999,11 @@ pub const FuncGen = struct { const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try o.lowerType(param_ty); if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } else { - try llvm_args.append(llvm_arg); + try llvm_args.append(llvm_arg.toLlvm(&self.wip)); } }, .byref => { @@ -4652,14 +5011,13 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); if (isByRef(param_ty, mod)) { - try llvm_args.append(llvm_arg); + try llvm_args.append(llvm_arg.toLlvm(&self.wip)); } else { - const alignment = param_ty.abiAlignment(mod); - const param_llvm_ty = llvm_arg.typeOf(); - const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); - const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const param_llvm_ty = llvm_arg.typeOfWip(&self.wip); + const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); + _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); + try llvm_args.append(arg_ptr.toLlvm(&self.wip)); } }, .byref_mut => { @@ -4667,134 +5025,124 @@ pub const FuncGen = struct { const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); const param_llvm_ty = try o.lowerType(param_ty); - const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); + const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment); if (isByRef(param_ty, mod)) { - const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - - const store_inst = self.builder.buildStore(load_inst, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, ""); + _ = try self.wip.store(.normal, loaded, arg_ptr, alignment); } else { - const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); - store_inst.setAlignment(alignment); - try llvm_args.append(arg_ptr); + _ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment); } + try llvm_args.append(arg_ptr.toLlvm(&self.wip)); }, .abi_sized_int => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); - const int_llvm_ty = self.context.intType(abi_size * 8); + const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8)); if (isByRef(param_ty, mod)) { - const alignment = param_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } else { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. - const alignment = @max( + const alignment = Builder.Alignment.fromByteUnits(@max( param_ty.abiAlignment(mod), - o.target_data.abiAlignmentOfType(int_llvm_ty), - ); - const int_ptr = self.buildAlloca(int_llvm_ty, alignment); - const store_inst = self.builder.buildStore(llvm_arg, int_ptr); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(int_llvm_ty, int_ptr, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + o.target_data.abiAlignmentOfType(int_llvm_ty.toLlvm(&o.builder)), + )); + const int_ptr = try self.buildAlloca(int_llvm_ty, alignment); + _ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment); + const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); } }, .slice => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const ptr = self.builder.buildExtractValue(llvm_arg, 0, ""); - const len = self.builder.buildExtractValue(llvm_arg, 1, ""); - try llvm_args.ensureUnusedCapacity(2); - llvm_args.appendAssumeCapacity(ptr); - llvm_args.appendAssumeCapacity(len); + const ptr = try self.wip.extractValue(llvm_arg, &.{0}, ""); + const len = try self.wip.extractValue(llvm_arg, &.{1}, ""); + try llvm_args.appendSlice(&.{ ptr.toLlvm(&self.wip), len.toLlvm(&self.wip) }); }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; const param_ty = self.typeOf(arg); - const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; + const llvm_types = it.types_buffer[0..it.types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); - const arg_ptr = if (is_by_ref) llvm_arg else p: { - const p = self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(mod)); - break :p p; + const arg_ptr = if (is_by_ref) llvm_arg else ptr: { + const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod)); + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + break :ptr ptr; }; - const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False); - try llvm_args.ensureUnusedCapacity(it.llvm_types_len); - for (llvm_types, 0..) |field_ty, i_usize| { - const i = @as(c_uint, @intCast(i_usize)); - const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); - const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); - load_inst.setAlignment(target.ptrBitWidth() / 8); - llvm_args.appendAssumeCapacity(load_inst); + const llvm_ty = try o.builder.structType(.normal, llvm_types); + try llvm_args.ensureUnusedCapacity(it.types_len); + for (llvm_types, 0..) |field_ty, i| { + const alignment = + Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const field_ptr = try self.wip.gepStruct(llvm_ty, arg_ptr, i, ""); + const loaded = try self.wip.load(.normal, field_ty, field_ptr, alignment, ""); + llvm_args.appendAssumeCapacity(loaded.toLlvm(&self.wip)); } }, .as_u16 => { const arg = args[it.zig_index - 1]; const llvm_arg = try self.resolveInst(arg); - const casted = self.builder.buildBitCast(llvm_arg, self.context.intType(16), ""); - try llvm_args.append(casted); + const casted = try self.wip.cast(.bitcast, llvm_arg, .i16, ""); + try llvm_args.append(casted.toLlvm(&self.wip)); }, .float_array => |count| { const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); if (!isByRef(arg_ty, mod)) { - const p = self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(mod)); - llvm_arg = store_inst; + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + llvm_arg = ptr; } const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); - const array_llvm_ty = float_ty.arrayType(count); + const array_ty = try o.builder.arrayType(count, float_ty); - const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); }, .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); if (!isByRef(arg_ty, mod)) { - const p = self.buildAlloca(llvm_arg.typeOf(), null); - const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(mod)); - llvm_arg = store_inst; + const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, llvm_arg, ptr, alignment); + llvm_arg = ptr; } - const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); - const alignment = arg_ty.abiAlignment(mod); - const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); - load_inst.setAlignment(alignment); - try llvm_args.append(load_inst); + const array_ty = + try o.builder.arrayType(arr_len, try o.builder.intType(@intCast(elem_size))); + const loaded = try self.wip.load(.normal, array_ty, llvm_arg, alignment, ""); + try llvm_args.append(loaded.toLlvm(&self.wip)); }, }; - const call = self.builder.buildCall( - try o.lowerType(zig_fn_ty), - llvm_fn, - llvm_args.items.ptr, - @as(c_uint, @intCast(llvm_args.items.len)), - toLlvmCallConv(fn_info.cc, target), - attr, - "", + const llvm_fn_ty = try o.lowerType(zig_fn_ty); + const call = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&self.wip), + llvm_args.items.ptr, + @intCast(llvm_args.items.len), + toLlvmCallConv(fn_info.cc, target), + attr, + "", + ), + &self.wip, ); if (callee_ty.zigTypeTag(mod) == .Pointer) { @@ -4802,12 +5150,12 @@ pub const FuncGen = struct { it = iterateParamTypes(o, fn_info); it.llvm_index += @intFromBool(sret); it.llvm_index += @intFromBool(err_return_tracing); - while (it.next()) |lowering| switch (lowering) { + while (try it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types.get(ip)[param_index].toType(); if (!isByRef(param_ty, mod)) { - o.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); + o.addByValParamAttrs(call.toLlvm(&self.wip), param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { @@ -4815,10 +5163,10 @@ pub const FuncGen = struct { const param_ty = fn_info.param_types.get(ip)[param_index].toType(); const param_llvm_ty = try o.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); - o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); + o.addByRefParamAttrs(call.toLlvm(&self.wip), it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { - o.addArgAttr(call, it.llvm_index - 1, "noundef"); + o.addArgAttr(call.toLlvm(&self.wip), it.llvm_index - 1, "noundef"); }, // No attributes needed for these. .no_bits, @@ -4838,41 +5186,40 @@ pub const FuncGen = struct { if (math.cast(u5, it.zig_index - 1)) |i| { if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) { - o.addArgAttr(call, llvm_arg_i, "noalias"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "noalias"); } } if (param_ty.zigTypeTag(mod) != .Optional) { - o.addArgAttr(call, llvm_arg_i, "nonnull"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "nonnull"); } if (ptr_info.flags.is_const) { - o.addArgAttr(call, llvm_arg_i, "readonly"); + o.addArgAttr(call.toLlvm(&self.wip), llvm_arg_i, "readonly"); } const elem_align = ptr_info.flags.alignment.toByteUnitsOptional() orelse @max(ptr_info.child.toType().abiAlignment(mod), 1); - o.addArgAttrInt(call, llvm_arg_i, "align", elem_align); + o.addArgAttrInt(call.toLlvm(&self.wip), llvm_arg_i, "align", elem_align); }, }; } if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { - return null; + return .none; } if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { - return null; + return .none; } const llvm_ret_ty = try o.lowerType(return_type); if (ret_ptr) |rp| { - call.setCallSret(llvm_ret_ty); + call.toLlvm(&self.wip).setCallSret(llvm_ret_ty.toLlvm(&o.builder)); if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. - const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(mod)); - return loaded; + const return_alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, ""); } } @@ -4882,26 +5229,23 @@ pub const FuncGen = struct { // In this case the function return type is honoring the calling convention by having // a different LLVM type than the usual one. We solve this here at the callsite // by using our canonical type, then loading it if necessary. - const alignment = o.target_data.abiAlignmentOfType(abi_ret_ty); - const rp = self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(call, rp); - store_inst.setAlignment(alignment); - if (isByRef(return_type, mod)) { - return rp; - } else { - const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); - load_inst.setAlignment(alignment); - return load_inst; - } + const alignment = Builder.Alignment.fromByteUnits( + o.target_data.abiAlignmentOfType(abi_ret_ty.toLlvm(&o.builder)), + ); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); + _ = try self.wip.store(.normal, call, rp, alignment); + return if (isByRef(return_type, mod)) + rp + else + try self.wip.load(.normal, llvm_ret_ty, rp, alignment, ""); } if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(mod); - const rp = self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(call, rp); - store_inst.setAlignment(alignment); + const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod)); + const rp = try self.buildAlloca(llvm_ret_ty, alignment); + _ = try self.wip.store(.normal, call, rp, alignment); return rp; } else { return call; @@ -4914,13 +5258,10 @@ pub const FuncGen = struct { const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; const msg_decl = mod.declPtr(msg_decl_index); const msg_len = msg_decl.ty.childType(mod).arrayLen(mod); - const msg_ptr = try o.lowerValue(.{ - .ty = msg_decl.ty, - .val = msg_decl.val, - }); - const null_opt_addr_global = try o.getNullOptAddr(); + const msg_ptr = try o.lowerValue(try msg_decl.internValue(mod)); + const null_opt_addr_global = try fg.resolveNullOptUsize(); const target = mod.getTarget(); - const llvm_usize = fg.context.intType(target.ptrBitWidth()); + const llvm_usize = try o.lowerType(Type.usize); // example: // call fastcc void @test2.panic( // ptr @builtin.panic_messages.integer_overflow__anon_987, ; msg.ptr @@ -4929,38 +5270,38 @@ pub const FuncGen = struct { // ptr @2, ; addr (null ?usize) // ) const args = [4]*llvm.Value{ - msg_ptr, - llvm_usize.constInt(msg_len, .False), - fg.context.pointerType(0).constNull(), - null_opt_addr_global, + msg_ptr.toLlvm(&o.builder), + (try o.builder.intConst(llvm_usize, msg_len)).toLlvm(&o.builder), + (try o.builder.nullConst(.ptr)).toLlvm(&o.builder), + null_opt_addr_global.toLlvm(&o.builder), }; const panic_func = mod.funcInfo(mod.panic_func_index); const panic_decl = mod.declPtr(panic_func.owner_decl); const fn_info = mod.typeToFunc(panic_decl.ty).?; const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); - _ = fg.builder.buildCall( - try o.lowerType(panic_decl.ty), - panic_global, + _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildCall( + (try o.lowerType(panic_decl.ty)).toLlvm(&o.builder), + panic_global.toLlvm(&o.builder), &args, args.len, toLlvmCallConv(fn_info.cc, target), .Auto, "", - ); - _ = fg.builder.buildUnreachable(); + ), &fg.wip); + _ = try fg.wip.@"unreachable"(); } - fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.typeOf(un_op); - if (self.ret_ptr) |ret_ptr| { + if (self.ret_ptr != .none) { const operand = try self.resolveInst(un_op); const ptr_ty = try mod.singleMutPtrType(ret_ty); - try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); - _ = self.builder.buildRetVoid(); - return null; + try self.store(self.ret_ptr, ptr_ty, operand, .none); + _ = try self.wip.retVoid(); + return .none; } const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4968,43 +5309,37 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerType(Type.anyerror); - _ = self.builder.buildRet(err_int.constInt(0, .False)); + _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0)); } else { - _ = self.builder.buildRetVoid(); + _ = try self.wip.retVoid(); } - return null; + return .none; } const abi_ret_ty = try lowerFnRetTy(o, fn_info); const operand = try self.resolveInst(un_op); - const alignment = ret_ty.abiAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod)); if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. - const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); - load_inst.setAlignment(alignment); - _ = self.builder.buildRet(load_inst); - return null; + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); + return .none; } - const llvm_ret_ty = operand.typeOf(); + const llvm_ret_ty = operand.typeOfWip(&self.wip); if (abi_ret_ty == llvm_ret_ty) { - _ = self.builder.buildRet(operand); - return null; + _ = try self.wip.ret(operand); + return .none; } - const rp = self.buildAlloca(llvm_ret_ty, alignment); - const store_inst = self.builder.buildStore(operand, rp); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(abi_ret_ty, rp, ""); - load_inst.setAlignment(alignment); - _ = self.builder.buildRet(load_inst); - return null; + const rp = try self.buildAlloca(llvm_ret_ty, alignment); + _ = try self.wip.store(.normal, operand, rp, alignment); + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, rp, alignment, "")); + return .none; } - fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -5016,36 +5351,34 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try o.lowerType(Type.anyerror); - _ = self.builder.buildRet(err_int.constInt(0, .False)); + _ = try self.wip.ret(try o.builder.intValue(Builder.Type.err_int, 0)); } else { - _ = self.builder.buildRetVoid(); + _ = try self.wip.retVoid(); } - return null; + return .none; } - if (self.ret_ptr != null) { - _ = self.builder.buildRetVoid(); - return null; + if (self.ret_ptr != .none) { + _ = try self.wip.retVoid(); + return .none; } const ptr = try self.resolveInst(un_op); const abi_ret_ty = try lowerFnRetTy(o, fn_info); - const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(mod)); - _ = self.builder.buildRet(loaded); - return null; + const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod)); + _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); + return .none; } - fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = self.air.getRefType(ty_op.ty); const llvm_arg_ty = try o.lowerType(arg_ty); - return self.builder.buildVAArg(list, llvm_arg_ty, ""); + return self.wip.vaArg(list, llvm_arg_ty, ""); } - fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_list = try self.resolveInst(ty_op.operand); @@ -5053,75 +5386,86 @@ pub const FuncGen = struct { const llvm_va_list_ty = try o.lowerType(va_list_ty); const mod = o.module; - const result_alignment = va_list_ty.abiAlignment(mod); - const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); + const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); + const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{ - self.context.pointerType(0), - self.context.pointerType(0), - }; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); - }; + const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .ptr }, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - const args: [2]*llvm.Value = .{ dest_list, src_list }; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const args: [2]*llvm.Value = .{ dest_list.toLlvm(&self.wip), src_list.toLlvm(&self.wip) }; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); - if (isByRef(va_list_ty, mod)) { - return dest_list; - } else { - const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); - loaded.setAlignment(result_alignment); - return loaded; - } + return if (isByRef(va_list_ty, mod)) + dest_list + else + try self.wip.load(.normal, llvm_va_list_ty, dest_list, result_alignment, ""); } - fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const list = try self.resolveInst(un_op); const llvm_fn_name = "llvm.va_end"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.context.pointerType(0)}; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); - }; - const args: [1]*llvm.Value = .{list}; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - return null; + const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); + + const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + return .none; } - fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); - const result_alignment = va_list_ty.abiAlignment(mod); - const list = self.buildAlloca(llvm_va_list_ty, result_alignment); + const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod)); + const list = try self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; - const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const param_types = [_]*llvm.Type{self.context.pointerType(0)}; - const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); - }; - const args: [1]*llvm.Value = .{list}; - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const llvm_fn_ty = try o.builder.fnType(.void, &.{.ptr}, .normal); + const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); - if (isByRef(va_list_ty, mod)) { - return list; - } else { - const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); - loaded.setAlignment(result_alignment); - return loaded; - } + const args: [1]*llvm.Value = .{list.toLlvm(&self.wip)}; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + + return if (isByRef(va_list_ty, mod)) + list + else + try self.wip.load(.normal, llvm_va_list_ty, list, result_alignment, ""); } - fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value { + fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -5132,7 +5476,7 @@ pub const FuncGen = struct { return self.cmp(lhs, rhs, operand_ty, op); } - fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5146,21 +5490,30 @@ pub const FuncGen = struct { return self.cmp(lhs, rhs, vec_ty, cmp_op); } - fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const llvm_fn = try self.getCmpLtErrorsLenFunction(); - const args: [1]*llvm.Value = .{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const args: [1]*llvm.Value = .{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } fn cmp( self: *FuncGen, - lhs: *llvm.Value, - rhs: *llvm.Value, + lhs: Builder.Value, + rhs: Builder.Value, operand_ty: Type, op: math.CompareOperator, - ) Allocator.Error!*llvm.Value { + ) Allocator.Error!Builder.Value { const o = self.dg.object; const mod = o.module; const scalar_ty = operand_ty.scalarType(mod); @@ -5178,46 +5531,47 @@ pub const FuncGen = struct { // of optionals that are not pointers. const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = try o.lowerType(scalar_ty); - const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); - const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); - const llvm_i2 = self.context.intType(2); - const lhs_non_null_i2 = self.builder.buildZExt(lhs_non_null, llvm_i2, ""); - const rhs_non_null_i2 = self.builder.buildZExt(rhs_non_null, llvm_i2, ""); - const lhs_shifted = self.builder.buildShl(lhs_non_null_i2, llvm_i2.constInt(1, .False), ""); - const lhs_rhs_ored = self.builder.buildOr(lhs_shifted, rhs_non_null_i2, ""); - const both_null_block = self.context.appendBasicBlock(self.llvm_func, "BothNull"); - const mixed_block = self.context.appendBasicBlock(self.llvm_func, "Mixed"); - const both_pl_block = self.context.appendBasicBlock(self.llvm_func, "BothNonNull"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const llvm_switch = self.builder.buildSwitch(lhs_rhs_ored, mixed_block, 2); - const llvm_i2_00 = llvm_i2.constInt(0b00, .False); - const llvm_i2_11 = llvm_i2.constInt(0b11, .False); - llvm_switch.addCase(llvm_i2_00, both_null_block); - llvm_switch.addCase(llvm_i2_11, both_pl_block); - - self.builder.positionBuilderAtEnd(both_null_block); - _ = self.builder.buildBr(end_block); - - self.builder.positionBuilderAtEnd(mixed_block); - _ = self.builder.buildBr(end_block); - - self.builder.positionBuilderAtEnd(both_pl_block); + const lhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, lhs, is_by_ref); + const rhs_non_null = try self.optCmpNull(.ne, opt_llvm_ty, rhs, is_by_ref); + const llvm_i2 = try o.builder.intType(2); + const lhs_non_null_i2 = try self.wip.cast(.zext, lhs_non_null, llvm_i2, ""); + const rhs_non_null_i2 = try self.wip.cast(.zext, rhs_non_null, llvm_i2, ""); + const lhs_shifted = try self.wip.bin(.shl, lhs_non_null_i2, try o.builder.intValue(llvm_i2, 1), ""); + const lhs_rhs_ored = try self.wip.bin(.@"or", lhs_shifted, rhs_non_null_i2, ""); + const both_null_block = try self.wip.block(1, "BothNull"); + const mixed_block = try self.wip.block(1, "Mixed"); + const both_pl_block = try self.wip.block(1, "BothNonNull"); + const end_block = try self.wip.block(3, "End"); + var wip_switch = try self.wip.@"switch"(lhs_rhs_ored, mixed_block, 2); + defer wip_switch.finish(&self.wip); + try wip_switch.addCase( + try o.builder.intConst(llvm_i2, 0b00), + both_null_block, + &self.wip, + ); + try wip_switch.addCase( + try o.builder.intConst(llvm_i2, 0b11), + both_pl_block, + &self.wip, + ); + + self.wip.cursor = .{ .block = both_null_block }; + _ = try self.wip.br(end_block); + + self.wip.cursor = .{ .block = mixed_block }; + _ = try self.wip.br(end_block); + + self.wip.cursor = .{ .block = both_pl_block }; const lhs_payload = try self.optPayloadHandle(opt_llvm_ty, lhs, scalar_ty, true); const rhs_payload = try self.optPayloadHandle(opt_llvm_ty, rhs, scalar_ty, true); const payload_cmp = try self.cmp(lhs_payload, rhs_payload, payload_ty, op); - _ = self.builder.buildBr(end_block); - const both_pl_block_end = self.builder.getInsertBlock(); + _ = try self.wip.br(end_block); + const both_pl_block_end = self.wip.cursor.block; - self.builder.positionBuilderAtEnd(end_block); - const incoming_blocks: [3]*llvm.BasicBlock = .{ - both_null_block, - mixed_block, - both_pl_block_end, - }; - const llvm_i1 = self.context.intType(1); - const llvm_i1_0 = llvm_i1.constInt(0, .False); - const llvm_i1_1 = llvm_i1.constInt(1, .False); - const incoming_values: [3]*llvm.Value = .{ + self.wip.cursor = .{ .block = end_block }; + const llvm_i1_0 = try o.builder.intValue(.i1, 0); + const llvm_i1_1 = try o.builder.intValue(.i1, 1); + const incoming_values: [3]Builder.Value = .{ switch (op) { .eq => llvm_i1_1, .neq => llvm_i1_0, @@ -5231,47 +5585,48 @@ pub const FuncGen = struct { payload_cmp, }; - const phi_node = self.builder.buildPhi(llvm_i1, ""); - comptime assert(incoming_values.len == incoming_blocks.len); - phi_node.addIncoming( + const phi = try self.wip.phi(.i1, ""); + try phi.finish( &incoming_values, - &incoming_blocks, - incoming_values.len, + &.{ both_null_block, mixed_block, both_pl_block_end }, + &self.wip, ); - return phi_node; + return phi.toValue(); }, .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; const is_signed = int_ty.isSignedInt(mod); - const operation: llvm.IntPredicate = switch (op) { - .eq => .EQ, - .neq => .NE, - .lt => if (is_signed) llvm.IntPredicate.SLT else .ULT, - .lte => if (is_signed) llvm.IntPredicate.SLE else .ULE, - .gt => if (is_signed) llvm.IntPredicate.SGT else .UGT, - .gte => if (is_signed) llvm.IntPredicate.SGE else .UGE, + const cond: Builder.IntegerCondition = switch (op) { + .eq => .eq, + .neq => .ne, + .lt => if (is_signed) .slt else .ult, + .lte => if (is_signed) .sle else .ule, + .gt => if (is_signed) .sgt else .ugt, + .gte => if (is_signed) .sge else .uge, }; - return self.builder.buildICmp(operation, lhs, rhs, ""); + return self.wip.icmp(cond, lhs, rhs, ""); } - fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.typeOfIndex(inst); - const parent_bb = self.context.createBasicBlock("Block"); if (inst_ty.isNoReturn(mod)) { try self.genBody(body); - return null; + return .none; } - var breaks: BreakList = .{}; - defer breaks.deinit(self.gpa); + const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod); + + var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; + defer if (have_block_result) breaks.list.deinit(self.gpa); + const parent_bb = try self.wip.block(0, "Block"); try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .breaks = &breaks, @@ -5280,36 +5635,33 @@ pub const FuncGen = struct { try self.genBody(body); - self.llvm_func.appendExistingBasicBlock(parent_bb); - self.builder.positionBuilderAtEnd(parent_bb); + self.wip.cursor = .{ .block = parent_bb }; // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag(mod) == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - - const raw_llvm_ty = try o.lowerType(inst_ty); - - const llvm_ty = ty: { - // If the zig tag type is a function, this represents an actual function body; not - // a pointer to it. LLVM IR allows the call instruction to use function bodies instead - // of function pointers, however the phi makes it a runtime value and therefore - // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty, mod)) { - break :ty self.context.pointerType(0); - } - break :ty raw_llvm_ty; - }; + if (have_block_result) { + const raw_llvm_ty = try o.lowerType(inst_ty); + const llvm_ty: Builder.Type = ty: { + // If the zig tag type is a function, this represents an actual function body; not + // a pointer to it. LLVM IR allows the call instruction to use function bodies instead + // of function pointers, however the phi makes it a runtime value and therefore + // the LLVM type has to be wrapped in a pointer. + if (inst_ty.zigTypeTag(mod) == .Fn or isByRef(inst_ty, mod)) { + break :ty .ptr; + } + break :ty raw_llvm_ty; + }; - const phi_node = self.builder.buildPhi(llvm_ty, ""); - phi_node.addIncoming( - breaks.items(.val).ptr, - breaks.items(.bb).ptr, - @as(c_uint, @intCast(breaks.len)), - ); - return phi_node; + parent_bb.ptr(&self.wip).incoming = @intCast(breaks.list.len); + const phi = try self.wip.phi(llvm_ty, ""); + try phi.finish(breaks.list.items(.val), breaks.list.items(.bb), &self.wip); + return phi.toValue(); + } else { + parent_bb.ptr(&self.wip).incoming = @intCast(breaks.len); + return .none; + } } - fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const branch = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(branch.block_inst).?; @@ -5317,42 +5669,39 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); const mod = o.module; - if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { + if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the // break instructions. - try block.breaks.append(self.gpa, .{ - .bb = self.builder.getInsertBlock(), - .val = val, - }); - } - _ = self.builder.buildBr(block.parent_bb); - return null; + try block.breaks.list.append(self.gpa, .{ .bb = self.wip.cursor.block, .val = val }); + } else block.breaks.len += 1; + _ = try self.wip.br(block.parent_bb); + return .none; } - fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const then_block = self.context.appendBasicBlock(self.llvm_func, "Then"); - const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - _ = self.builder.buildCondBr(cond, then_block, else_block); + const then_block = try self.wip.block(1, "Then"); + const else_block = try self.wip.block(1, "Else"); + _ = try self.wip.brCond(cond, then_block, else_block); - self.builder.positionBuilderAtEnd(then_block); + self.wip.cursor = .{ .block = then_block }; try self.genBody(then_body); - self.builder.positionBuilderAtEnd(else_block); + self.wip.cursor = .{ .block = else_block }; try self.genBody(else_body); // No need to reset the insert cursor since this instruction is noreturn. - return null; + return .none; } - fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5367,7 +5716,7 @@ pub const FuncGen = struct { return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } - fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5381,13 +5730,13 @@ pub const FuncGen = struct { fn lowerTry( fg: *FuncGen, - err_union: *llvm.Value, + err_union: Builder.Value, body: []const Air.Inst.Index, err_union_ty: Type, operand_is_ptr: bool, can_elide_load: bool, is_unused: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -5395,122 +5744,135 @@ pub const FuncGen = struct { const err_union_llvm_ty = try o.lowerType(err_union_ty); if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const is_err = err: { - const err_set_ty = try o.lowerType(Type.anyerror); - const zero = err_set_ty.constNull(); + const loaded = loaded: { if (!payload_has_bits) { // TODO add alignment to this load - const loaded = if (operand_is_ptr) - fg.builder.buildLoad(err_set_ty, err_union, "") + break :loaded if (operand_is_ptr) + try fg.wip.load(.normal, Builder.Type.err_int, err_union, .default, "") else err_union; - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } const err_field_index = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { - const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); + const err_field_ptr = + try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load - const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); + break :loaded try fg.wip.load( + .normal, + Builder.Type.err_int, + err_field_ptr, + .default, + "", + ); } - const loaded = fg.builder.buildExtractValue(err_union, err_field_index, ""); - break :err fg.builder.buildICmp(.NE, loaded, zero, ""); + break :loaded try fg.wip.extractValue(err_union, &.{err_field_index}, ""); }; + const zero = try o.builder.intValue(Builder.Type.err_int, 0); + const is_err = try fg.wip.icmp(.ne, loaded, zero, ""); - const return_block = fg.context.appendBasicBlock(fg.llvm_func, "TryRet"); - const continue_block = fg.context.appendBasicBlock(fg.llvm_func, "TryCont"); - _ = fg.builder.buildCondBr(is_err, return_block, continue_block); + const return_block = try fg.wip.block(1, "TryRet"); + const continue_block = try fg.wip.block(1, "TryCont"); + _ = try fg.wip.brCond(is_err, return_block, continue_block); - fg.builder.positionBuilderAtEnd(return_block); + fg.wip.cursor = .{ .block = return_block }; try fg.genBody(body); - fg.builder.positionBuilderAtEnd(continue_block); - } - if (is_unused) { - return null; - } - if (!payload_has_bits) { - return if (operand_is_ptr) err_union else null; + fg.wip.cursor = .{ .block = continue_block }; } + if (is_unused) return .none; + if (!payload_has_bits) return if (operand_is_ptr) err_union else .none; const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { - return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); + return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); } else if (isByRef(err_union_ty, mod)) { - const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); + const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, ""); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(mod)); - return load_inst; + const load_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; + return fg.wip.load(.normal, load_ty, payload_ptr, payload_alignment, ""); } - return fg.builder.buildExtractValue(err_union, offset, ""); + return fg.wip.extractValue(err_union, &.{offset}, ""); } - fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = mod.getTarget(); - const llvm_usize = self.context.intType(target.ptrBitWidth()); - const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) - self.builder.buildPtrToInt(cond, llvm_usize, "") + const else_block = try self.wip.block(1, "Default"); + const llvm_usize = try o.lowerType(Type.usize); + const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder)) + try self.wip.cast(.ptrtoint, cond, llvm_usize, "") else cond; - const llvm_switch = self.builder.buildSwitch(cond_int, else_block, switch_br.data.cases_len); var extra_index: usize = switch_br.end; var case_i: u32 = 0; + var llvm_cases_len: u32 = 0; + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = self.air.extraData(Air.SwitchBr.Case, extra_index); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); + const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + case.data.items_len + case_body.len; + + llvm_cases_len += @intCast(items.len); + } + + var wip_switch = try self.wip.@"switch"(cond_int, else_block, llvm_cases_len); + defer wip_switch.finish(&self.wip); + extra_index = switch_br.end; + case_i = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len])); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; - const case_block = self.context.appendBasicBlock(self.llvm_func, "Case"); + const case_block = try self.wip.block(@intCast(items.len), "Case"); for (items) |item| { - const llvm_item = try self.resolveInst(item); - const llvm_int_item = if (llvm_item.typeOf().getTypeKind() == .Pointer) - llvm_item.constPtrToInt(llvm_usize) + const llvm_item = (try self.resolveInst(item)).toConst().?; + const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder)) + try o.builder.castConst(.ptrtoint, llvm_item, llvm_usize) else llvm_item; - llvm_switch.addCase(llvm_int_item, case_block); + try wip_switch.addCase(llvm_int_item, case_block, &self.wip); } - self.builder.positionBuilderAtEnd(case_block); + self.wip.cursor = .{ .block = case_block }; try self.genBody(case_body); } - self.builder.positionBuilderAtEnd(else_block); + self.wip.cursor = .{ .block = else_block }; const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; if (else_body.len != 0) { try self.genBody(else_body); } else { - _ = self.builder.buildUnreachable(); + _ = try self.wip.@"unreachable"(); } // No need to reset the insert cursor since this instruction is noreturn. - return null; + return .none; } - fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const loop_block = self.context.appendBasicBlock(self.llvm_func, "Loop"); - _ = self.builder.buildBr(loop_block); + const loop_block = try self.wip.block(2, "Loop"); + _ = try self.wip.br(loop_block); - self.builder.positionBuilderAtEnd(loop_block); + self.wip.cursor = .{ .block = loop_block }; try self.genBody(body); // TODO instead of this logic, change AIR to have the property that @@ -5520,35 +5882,30 @@ pub const FuncGen = struct { // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { - _ = self.builder.buildBr(loop_block); + _ = try self.wip.br(loop_block); } - return null; + return .none; } - fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(mod); const llvm_usize = try o.lowerType(Type.usize); - const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); + const len = try o.builder.intValue(llvm_usize, array_ty.arrayLen(mod)); const slice_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); - } - const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), llvm_usize.constNull(), - }; - const array_llvm_ty = try o.lowerType(array_ty); - const ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indices, indices.len, ""); - const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), ptr, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) + return self.wip.buildAggregate(slice_llvm_ty, &.{ operand, len }, ""); + const ptr = try self.wip.gep(.inbounds, try o.lowerType(array_ty), operand, &.{ + try o.builder.intValue(llvm_usize, 0), try o.builder.intValue(llvm_usize, 0), + }, ""); + return self.wip.buildAggregate(slice_llvm_ty, &.{ ptr, len }, ""); } - fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -5562,51 +5919,53 @@ pub const FuncGen = struct { const dest_llvm_ty = try o.lowerType(dest_ty); const target = mod.getTarget(); - if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt(mod)) { - return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); - } else { - return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); - } - } + if (intrinsicsAllowed(dest_scalar_ty, target)) return self.wip.conv( + if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + dest_llvm_ty, + "", + ); - const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod))); - const rt_int_bits = compilerRtIntBits(operand_bits); - const rt_int_ty = self.context.intType(rt_int_bits); - var extended = e: { - if (operand_scalar_ty.isSignedInt(mod)) { - break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); - } else { - break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); - } - }; + const rt_int_bits = compilerRtIntBits(@intCast(operand_scalar_ty.bitSize(mod))); + const rt_int_ty = try o.builder.intType(rt_int_bits); + var extended = try self.wip.conv( + if (operand_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + rt_int_ty, + "", + ); const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ + const fn_name = try o.builder.fmt("__float{s}{s}i{s}f", .{ sign_prefix, compiler_rt_operand_abbrev, compiler_rt_dest_abbrev, - }) catch unreachable; + }); - var param_types = [1]*llvm.Type{rt_int_ty}; + var param_type = rt_int_ty; if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. - const v2i64 = self.context.intType(64).vectorType(2); - extended = self.builder.buildBitCast(extended, v2i64, ""); - param_types = [1]*llvm.Type{v2i64}; + param_type = try o.builder.vectorType(.normal, 2, .i64); + extended = try self.wip.cast(.bitcast, extended, param_type, ""); } - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); - const params = [1]*llvm.Value{extended}; - - return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); + const libc_fn = try self.getLibcFunction(fn_name, &.{param_type}, dest_llvm_ty); + const params = [1]*llvm.Value{extended.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } - fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airIntFromFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -5624,19 +5983,20 @@ pub const FuncGen = struct { if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt(mod)) { - return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); - } else { - return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); - } + return self.wip.conv( + if (dest_scalar_ty.isSignedInt(mod)) .signed else .unsigned, + operand, + dest_llvm_ty, + "", + ); } - const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod)))); - const ret_ty = self.context.intType(rt_int_bits); + const rt_int_bits = compilerRtIntBits(@intCast(dest_scalar_ty.bitSize(mod))); + const ret_ty = try o.builder.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard // i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have. - break :b self.context.intType(64).vectorType(2); + break :b try o.builder.vectorType(.normal, 2, .i64); } else ret_ty; const operand_bits = operand_scalar_ty.floatBits(target); @@ -5645,66 +6005,66 @@ pub const FuncGen = struct { const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ + const fn_name = try o.builder.fmt("__fix{s}{s}f{s}i", .{ sign_prefix, compiler_rt_operand_abbrev, compiler_rt_dest_abbrev, - }) catch unreachable; + }); const operand_llvm_ty = try o.lowerType(operand_ty); - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); - const params = [1]*llvm.Value{operand}; - - var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); + const libc_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, libc_ret_ty); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + var result = (try self.wip.unimplemented(libc_ret_ty, "")).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); - if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty, ""); - if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty, ""); + if (libc_ret_ty != ret_ty) result = try self.wip.cast(.bitcast, result, ret_ty, ""); + if (ret_ty != dest_llvm_ty) result = try self.wip.cast(.trunc, result, dest_llvm_ty, ""); return result; } - fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { + fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; - if (ty.isSlice(mod)) { - return fg.builder.buildExtractValue(ptr, 0, ""); - } else { - return ptr; - } + return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } - fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { + fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; - const target = mod.getTarget(); - const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); + const llvm_usize = try o.lowerType(Type.usize); switch (ty.ptrSize(mod)) { .Slice => { - const len = fg.builder.buildExtractValue(ptr, 1, ""); + const len = try fg.wip.extractValue(ptr, &.{1}, ""); const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; - const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); - return fg.builder.buildMul(len, abi_size_llvm_val, ""); + const abi_size_llvm_val = try o.builder.intValue(llvm_usize, abi_size); + return fg.wip.bin(.@"mul nuw", len, abi_size_llvm_val, ""); }, .One => { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); + return o.builder.intValue(llvm_usize, array_ty.arrayLen(mod) * abi_size); }, .Many, .C => unreachable, } } - fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: u32) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return self.builder.buildExtractValue(operand, index, ""); + return self.wip.extractValue(operand, &.{index}, ""); } - fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -5712,10 +6072,10 @@ pub const FuncGen = struct { const slice_ptr_ty = self.typeOf(ty_op.operand); const slice_llvm_ty = try o.lowerPtrElemTy(slice_ptr_ty.childType(mod)); - return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); + return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, ""); } - fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5725,20 +6085,20 @@ pub const FuncGen = struct { const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(mod); const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); - const base_ptr = self.builder.buildExtractValue(slice, 0, ""); - const indices: [1]*llvm.Value = .{index}; - const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); + const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); + const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(ptr, elem_ty, elem_alignment, false); } return self.load(ptr, slice_ty); } - fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5748,12 +6108,11 @@ pub const FuncGen = struct { const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const llvm_elem_ty = try o.lowerPtrElemTy(slice_ty.childType(mod)); - const base_ptr = self.builder.buildExtractValue(slice, 0, ""); - const indices: [1]*llvm.Value = .{index}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); + const base_ptr = try self.wip.extractValue(slice, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, ""); } - fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5765,13 +6124,15 @@ pub const FuncGen = struct { const array_llvm_ty = try o.lowerType(array_ty); const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; + const indices: [2]Builder.Value = .{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs, + }; if (isByRef(elem_ty, mod)) { - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); - if (canElideLoad(self, body_tail)) - return elem_ptr; - - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); + const elem_ptr = + try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); + if (canElideLoad(self, body_tail)) return elem_ptr; + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(elem_ptr, elem_ty, elem_alignment, false); } else { const elem_llvm_ty = try o.lowerType(elem_ty); if (Air.refToIndex(bin_op.lhs)) |lhs_index| { @@ -5781,26 +6142,38 @@ pub const FuncGen = struct { if (Air.refToIndex(load_ptr)) |load_ptr_index| { const load_ptr_tag = self.air.instructions.items(.tag)[load_ptr_index]; switch (load_ptr_tag) { - .struct_field_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3 => { + .struct_field_ptr, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + => { const load_ptr_inst = try self.resolveInst(load_ptr); - const gep = self.builder.buildInBoundsGEP(array_llvm_ty, load_ptr_inst, &indices, indices.len, ""); - return self.builder.buildLoad(elem_llvm_ty, gep, ""); + const gep = try self.wip.gep( + .inbounds, + array_llvm_ty, + load_ptr_inst, + &indices, + "", + ); + return self.wip.load(.normal, elem_llvm_ty, gep, .default, ""); }, else => {}, } } } } - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); - return self.builder.buildLoad(elem_llvm_ty, elem_ptr, ""); + const elem_ptr = + try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, ""); + return self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, ""); } } // This branch can be reached for vectors, which are always by-value. - return self.builder.buildExtractElement(array_llvm_val, rhs, ""); + return self.wip.extractElement(array_llvm_val, rhs, ""); } - fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5811,32 +6184,28 @@ pub const FuncGen = struct { const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch - const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { + const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } else ptr: { - const indices: [1]*llvm.Value = .{rhs}; - break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - }; + &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } + else + &.{rhs}, ""); if (isByRef(elem_ty, mod)) { - if (self.canElideLoad(body_tail)) - return ptr; - - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); + if (self.canElideLoad(body_tail)) return ptr; + const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.loadByRef(ptr, elem_ty, elem_alignment, false); } return self.load(ptr, ptr_ty); } - fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(mod); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return (try o.lowerPtrToVoid(ptr_ty)).toValue(); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5845,17 +6214,14 @@ pub const FuncGen = struct { if (elem_ptr.ptrInfo(mod).flags.vector_index != .none) return base_ptr; const llvm_elem_ty = try o.lowerPtrElemTy(elem_ty); - if (ptr_ty.isSinglePointer(mod)) { + return try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(mod)) // If this is a single-item pointer to an array, we need another index in the GEP. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } else { - const indices: [1]*llvm.Value = .{rhs}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - } + &.{ try o.builder.intValue(try o.lowerType(Type.usize), 0), rhs } + else + &.{rhs}, ""); } - fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); @@ -5867,14 +6233,14 @@ pub const FuncGen = struct { self: *FuncGen, inst: Air.Inst.Index, field_index: u32, - ) !?*llvm.Value { + ) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } - fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -5884,9 +6250,7 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return null; - } + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (!isByRef(struct_ty, mod)) { assert(!isByRef(field_ty, mod)); @@ -5896,25 +6260,26 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(struct_ty).?; const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; - const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); - const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); + const shift_amt = + try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); + const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); + return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, ""); }, else => { const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index; - return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); + return self.wip.extractValue(struct_llvm_val, &.{llvm_field_index}, ""); }, }, .Union => { @@ -5922,17 +6287,17 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, containing_int, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } else if (field_ty.isPtrAtRuntime(mod)) { - const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod))); - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(mod))); + const truncated_int = + try self.wip.cast(.trunc, containing_int, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(containing_int, elem_llvm_ty, ""); + return self.wip.cast(.trunc, containing_int, elem_llvm_ty, ""); }, else => unreachable, } @@ -5943,7 +6308,8 @@ pub const FuncGen = struct { assert(struct_ty.containerLayout(mod) != .Packed); const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try o.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); + const field_ptr = + try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), .flags = .{ @@ -5955,7 +6321,8 @@ pub const FuncGen = struct { return field_ptr; assert(llvm_field.alignment != 0); - return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false); + const field_alignment = Builder.Alignment.fromByteUnits(llvm_field.alignment); + return self.loadByRef(field_ptr, field_ty, field_alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } @@ -5964,22 +6331,22 @@ pub const FuncGen = struct { const union_llvm_ty = try o.lowerType(struct_ty); const layout = struct_ty.unionGetLayout(mod); const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); - const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); + const field_ptr = + try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try o.lowerType(field_ty); + const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align); if (isByRef(field_ty, mod)) { - if (canElideLoad(self, body_tail)) - return field_ptr; - - return self.loadByRef(field_ptr, field_ty, layout.payload_align, false); + if (canElideLoad(self, body_tail)) return field_ptr; + return self.loadByRef(field_ptr, field_ty, payload_alignment, false); } else { - return self.builder.buildLoad(llvm_field_ty, field_ptr, ""); + return self.wip.load(.normal, llvm_field_ty, field_ptr, payload_alignment, ""); } }, else => unreachable, } } - fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -5987,50 +6354,52 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); - const target = o.module.getTarget(); const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); + if (field_offset == 0) return field_ptr; const res_ty = try o.lowerType(self.air.getRefType(ty_pl.ty)); - if (field_offset == 0) { - return field_ptr; - } - const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); + const llvm_usize = try o.lowerType(Type.usize); - const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); - const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); - return self.builder.buildIntToPtr(base_ptr_int, res_ty, ""); + const field_ptr_int = try self.wip.cast(.ptrtoint, field_ptr, llvm_usize, ""); + const base_ptr_int = try self.wip.bin( + .@"sub nuw", + field_ptr_int, + try o.builder.intValue(llvm_usize, field_offset), + "", + ); + return self.wip.cast(.inttoptr, base_ptr_int, res_ty, ""); } - fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airNot(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - return self.builder.buildNot(operand, ""); + return self.wip.not(operand, ""); } - fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - _ = self.builder.buildUnreachable(); - return null; + _ = try self.wip.@"unreachable"(); + return .none; } - fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value { - const di_scope = self.di_scope orelse return null; + fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const di_scope = self.di_scope orelse return .none; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1)); - self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1)); + self.prev_dbg_line = @intCast(self.base_line + dbg_stmt.line + 1); + self.prev_dbg_column = @intCast(dbg_stmt.column + 1); const inlined_at = if (self.dbg_inlined.items.len > 0) self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc else null; self.builder.setCurrentDebugLocation(self.prev_dbg_line, self.prev_dbg_column, di_scope, inlined_at); - return null; + return .none; } - fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; @@ -6083,12 +6452,12 @@ pub const FuncGen = struct { const lexical_block = dib.createLexicalBlock(subprogram.toScope(), di_file, line_number, 1); self.di_scope = lexical_block.toScope(); self.base_line = decl.src_line; - return null; + return .none; } - fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - if (o.di_builder == null) return null; + if (o.di_builder == null) return .none; const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; @@ -6098,30 +6467,30 @@ pub const FuncGen = struct { const old = self.dbg_inlined.pop(); self.di_scope = old.scope; self.base_line = old.base_line; - return null; + return .none; } - fn airDbgBlockBegin(self: *FuncGen) !?*llvm.Value { + fn airDbgBlockBegin(self: *FuncGen) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const old_scope = self.di_scope.?; try self.dbg_block_stack.append(self.gpa, old_scope); const lexical_block = dib.createLexicalBlock(old_scope, self.di_file.?, self.prev_dbg_line, self.prev_dbg_column); self.di_scope = lexical_block.toScope(); - return null; + return .none; } - fn airDbgBlockEnd(self: *FuncGen) !?*llvm.Value { + fn airDbgBlockEnd(self: *FuncGen) !Builder.Value { const o = self.dg.object; - if (o.di_builder == null) return null; + if (o.di_builder == null) return .none; self.di_scope = self.dbg_block_stack.pop(); - return null; + return .none; } - fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -6141,22 +6510,20 @@ pub const FuncGen = struct { else null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); - const insert_block = self.builder.getInsertBlock(); - _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); - return null; + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); + _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); + return .none; } - fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const dib = o.di_builder orelse return null; + const dib = o.di_builder orelse return .none; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - if (needDbgVarWorkaround(o)) { - return null; - } + if (needDbgVarWorkaround(o)) return .none; const di_local_var = dib.createAutoVariable( self.di_scope.?, @@ -6172,23 +6539,22 @@ pub const FuncGen = struct { else null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); - const insert_block = self.builder.getInsertBlock(); + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); const mod = o.module; if (isByRef(operand_ty, mod)) { - _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); + _ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(mod); - const alloca = self.buildAlloca(operand.typeOf(), alignment); - const store_inst = self.builder.buildStore(operand, alloca); - store_inst.setAlignment(alignment); - _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); + const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod)); + const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, operand, alloca, alignment); + _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else { - _ = dib.insertDbgValueIntrinsicAtEnd(operand, di_local_var, debug_loc, insert_block); + _ = dib.insertDbgValueIntrinsicAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } - return null; + return .none; } - fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { // Eventually, the Zig compiler needs to be reworked to have inline // assembly go through the same parsing code regardless of backend, and // have LLVM-flavored inline assembly be *output* from that assembler. @@ -6199,12 +6565,12 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; - const clobbers_len = @as(u31, @truncate(extra.data.flags)); + const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; - const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); + const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; - const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); + const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; @@ -6217,15 +6583,15 @@ pub const FuncGen = struct { // The exact number of return / parameter values depends on which output values // are passed by reference as indirect outputs (determined below). const max_return_count = outputs.len; - const llvm_ret_types = try arena.alloc(*llvm.Type, max_return_count); + const llvm_ret_types = try arena.alloc(Builder.Type, max_return_count); const llvm_ret_indirect = try arena.alloc(bool, max_return_count); const max_param_count = inputs.len + outputs.len; - const llvm_param_types = try arena.alloc(*llvm.Type, max_param_count); + const llvm_param_types = try arena.alloc(Builder.Type, max_param_count); const llvm_param_values = try arena.alloc(*llvm.Value, max_param_count); // This stores whether we need to add an elementtype attribute and // if so, the element type itself. - const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); + const llvm_param_attrs = try arena.alloc(Builder.Type, max_param_count); const mod = o.module; const target = mod.getTarget(); @@ -6262,8 +6628,8 @@ pub const FuncGen = struct { // Pass the result by reference as an indirect output (e.g. "=*m") llvm_constraints.appendAssumeCapacity('*'); - llvm_param_values[llvm_param_i] = output_inst; - llvm_param_types[llvm_param_i] = output_inst.typeOf(); + llvm_param_values[llvm_param_i] = output_inst.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = output_inst.typeOfWip(&self.wip); llvm_param_attrs[llvm_param_i] = elem_llvm_ty; llvm_param_i += 1; } else { @@ -6308,31 +6674,30 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.typeOf(input); - var llvm_elem_ty: ?*llvm.Type = null; + var llvm_elem_ty: Builder.Type = .none; if (isByRef(arg_ty, mod)) { llvm_elem_ty = try o.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { - llvm_param_values[llvm_param_i] = arg_llvm_value; - llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); + llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); const arg_llvm_ty = try o.lowerType(arg_ty); - const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); - load_inst.setAlignment(alignment); - llvm_param_values[llvm_param_i] = load_inst; + const load_inst = + try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, ""); + llvm_param_values[llvm_param_i] = load_inst.toLlvm(&self.wip); llvm_param_types[llvm_param_i] = arg_llvm_ty; } } else { if (constraintAllowsRegister(constraint)) { - llvm_param_values[llvm_param_i] = arg_llvm_value; - llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); + llvm_param_values[llvm_param_i] = arg_llvm_value.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip); } else { - const alignment = arg_ty.abiAlignment(mod); - const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); - const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); - store_inst.setAlignment(alignment); - llvm_param_values[llvm_param_i] = arg_ptr; - llvm_param_types[llvm_param_i] = arg_ptr.typeOf(); + const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod)); + const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment); + llvm_param_values[llvm_param_i] = arg_ptr.toLlvm(&self.wip); + llvm_param_types[llvm_param_i] = arg_ptr.typeOfWip(&self.wip); } } @@ -6356,10 +6721,12 @@ pub const FuncGen = struct { // In the case of indirect inputs, LLVM requires the callsite to have // an elementtype(<ty>) attribute. if (constraint[0] == '*') { - llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse + llvm_param_attrs[llvm_param_i] = if (llvm_elem_ty != .none) + llvm_elem_ty + else try o.lowerPtrElemTy(arg_ty.childType(mod)); } else { - llvm_param_attrs[llvm_param_i] = null; + llvm_param_attrs[llvm_param_i] = .none; } llvm_param_i += 1; @@ -6477,23 +6844,14 @@ pub const FuncGen = struct { } const ret_llvm_ty = switch (return_count) { - 0 => self.context.voidType(), + 0 => .void, 1 => llvm_ret_types[0], - else => self.context.structType( - llvm_ret_types.ptr, - @as(c_uint, @intCast(return_count)), - .False, - ), + else => try o.builder.structType(.normal, llvm_ret_types), }; - const llvm_fn_ty = llvm.functionType( - ret_llvm_ty, - llvm_param_types.ptr, - @as(c_uint, @intCast(param_count)), - .False, - ); + const llvm_fn_ty = try o.builder.fnType(ret_llvm_ty, llvm_param_types[0..param_count], .normal); const asm_fn = llvm.getInlineAsm( - llvm_fn_ty, + llvm_fn_ty.toLlvm(&o.builder), rendered_template.items.ptr, rendered_template.items.len, llvm_constraints.items.ptr, @@ -6503,18 +6861,18 @@ pub const FuncGen = struct { .ATT, .False, ); - const call = self.builder.buildCall( - llvm_fn_ty, + const call = (try self.wip.unimplemented(ret_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), asm_fn, llvm_param_values.ptr, - @as(c_uint, @intCast(param_count)), + @intCast(param_count), .C, .Auto, "", - ); + ), &self.wip); for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| { - if (llvm_elem_ty) |llvm_ty| { - llvm.setCallElemTypeAttr(call, i, llvm_ty); + if (llvm_elem_ty != .none) { + llvm.setCallElemTypeAttr(call.toLlvm(&self.wip), i, llvm_elem_ty.toLlvm(&o.builder)); } } @@ -6523,16 +6881,17 @@ pub const FuncGen = struct { for (outputs, 0..) |output, i| { if (llvm_ret_indirect[i]) continue; - const output_value = if (return_count > 1) b: { - break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), ""); - } else call; + const output_value = if (return_count > 1) + try self.wip.extractValue(call, &[_]u32{@intCast(llvm_ret_i)}, "") + else + call; if (output != .none) { const output_ptr = try self.resolveInst(output); const output_ptr_ty = self.typeOf(output); - const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(output_ptr_ty.ptrAlignment(mod)); + _ = try self.wip.store(.normal, output_value, output_ptr, alignment); } else { ret_val = output_value; } @@ -6546,8 +6905,8 @@ pub const FuncGen = struct { self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, - pred: llvm.IntPredicate, - ) !?*llvm.Value { + cond: Builder.IntegerCondition, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -6558,43 +6917,40 @@ pub const FuncGen = struct { const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty, operand, "") + try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else operand; if (payload_ty.isSlice(mod)) { - const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - const ptr_ty = try o.lowerType(payload_ty.slicePtrFieldType(mod)); - return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); + const slice_ptr = try self.wip.extractValue(loaded, &.{0}, ""); + const ptr_ty = try o.builder.ptrType(toLlvmAddressSpace( + payload_ty.ptrAddressSpace(mod), + mod.getTarget(), + )); + return self.wip.icmp(cond, slice_ptr, try o.builder.nullValue(ptr_ty), ""); } - return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); + return self.wip.icmp(cond, loaded, try o.builder.zeroInitValue(optional_llvm_ty), ""); } comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(optional_llvm_ty, operand, "") + try self.wip.load(.normal, optional_llvm_ty, operand, .default, "") else operand; - const llvm_i8 = self.context.intType(8); - return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); + return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), ""); } const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); - const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); - if (pred == .EQ) { - return self.builder.buildNot(non_null_bit, ""); - } else { - return non_null_bit; - } + return self.optCmpNull(cond, optional_llvm_ty, operand, is_by_ref); } fn airIsErr( self: *FuncGen, inst: Air.Inst.Index, - op: llvm.IntPredicate, + cond: Builder.IntegerCondition, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -6602,40 +6958,37 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(mod); - const err_set_ty = try o.lowerType(Type.anyerror); - const zero = err_set_ty.constNull(); + const zero = try o.builder.intValue(Builder.Type.err_int, 0); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const llvm_i1 = self.context.intType(1); - switch (op) { - .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 - .NE => return llvm_i1.constInt(0, .False), // 0 != 0 + const val: Builder.Constant = switch (cond) { + .eq => .true, // 0 == 0 + .ne => .false, // 0 != 0 else => unreachable, - } + }; + return val.toValue(); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) - self.builder.buildLoad(try o.lowerType(err_union_ty), operand, "") + try self.wip.load(.normal, try o.lowerType(err_union_ty), operand, .default, "") else operand; - return self.builder.buildICmp(op, loaded, zero, ""); + return self.wip.icmp(cond, loaded, zero, ""); } const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty, mod)) { + const loaded = if (operand_is_ptr or isByRef(err_union_ty, mod)) loaded: { const err_union_llvm_ty = try o.lowerType(err_union_ty); - const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); - const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); - return self.builder.buildICmp(op, loaded, zero, ""); - } - - const loaded = self.builder.buildExtractValue(operand, err_field_index, ""); - return self.builder.buildICmp(op, loaded, zero, ""); + const err_field_ptr = + try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, ""); + break :loaded try self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, ""); + } else try self.wip.extractValue(operand, &.{err_field_index}, ""); + return self.wip.icmp(cond, loaded, zero, ""); } - fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -6651,11 +7004,10 @@ pub const FuncGen = struct { // The payload and the optional are the same value. return operand; } - const optional_llvm_ty = try o.lowerType(optional_ty); - return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); + return self.wip.gepStruct(try o.lowerType(optional_ty), operand, 0, ""); } - fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { comptime assert(optional_layout_version == 3); const o = self.dg.object; @@ -6664,10 +7016,10 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = optional_ty.optionalChild(mod); - const non_null_bit = self.context.intType(8).constInt(1, .False); + const non_null_bit = try o.builder.intValue(.i8, 1); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. - _ = self.builder.buildStore(non_null_bit, operand); + _ = try self.wip.store(.normal, non_null_bit, operand, .default); return operand; } if (optional_ty.optionalReprIsPayload(mod)) { @@ -6678,18 +7030,17 @@ pub const FuncGen = struct { // First set the non-null bit. const optional_llvm_ty = try o.lowerType(optional_ty); - const non_null_ptr = self.builder.buildStructGEP(optional_llvm_ty, operand, 1, ""); + const non_null_ptr = try self.wip.gepStruct(optional_llvm_ty, operand, 1, ""); // TODO set alignment on this store - _ = self.builder.buildStore(non_null_bit, non_null_ptr); + _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); // Then return the payload pointer (only if it's used). - if (self.liveness.isUnused(inst)) - return null; + if (self.liveness.isUnused(inst)) return .none; - return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, ""); + return self.wip.gepStruct(optional_llvm_ty, operand, 0, ""); } - fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6697,7 +7048,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOf(ty_op.operand); const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. @@ -6713,7 +7064,7 @@ pub const FuncGen = struct { self: *FuncGen, body_tail: []const Air.Inst.Index, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -6725,32 +7076,30 @@ pub const FuncGen = struct { const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return if (operand_is_ptr) operand else null; + return if (operand_is_ptr) operand else .none; } const offset = errUnionPayloadOffset(payload_ty, mod); const err_union_llvm_ty = try o.lowerType(err_union_ty); if (operand_is_ptr) { - return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); + return self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); } else if (isByRef(err_union_ty, mod)) { - const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); + const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); if (isByRef(payload_ty, mod)) { - if (self.canElideLoad(body_tail)) - return payload_ptr; - - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); + if (self.canElideLoad(body_tail)) return payload_ptr; + return self.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } - const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(mod)); - return load_inst; + const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset]; + return self.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); } - return self.builder.buildExtractValue(operand, offset, ""); + return self.wip.extractValue(operand, &.{offset}, ""); } fn airErrUnionErr( self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -6758,34 +7107,31 @@ pub const FuncGen = struct { const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { - const err_llvm_ty = try o.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; } else { - return err_llvm_ty.constInt(0, .False); + return o.builder.intValue(Builder.Type.err_int, 0); } } - const err_set_llvm_ty = try o.lowerType(Type.anyerror); - const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; - return self.builder.buildLoad(err_set_llvm_ty, operand, ""); + return self.wip.load(.normal, Builder.Type.err_int, operand, .default, ""); } const offset = errUnionErrorOffset(payload_ty, mod); if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try o.lowerType(err_union_ty); - const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); + const err_field_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, ""); + return self.wip.load(.normal, Builder.Type.err_int, err_field_ptr, .default, ""); } - return self.builder.buildExtractValue(operand, offset, ""); + return self.wip.extractValue(operand, &.{offset}, ""); } - fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -6793,49 +7139,49 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); - const non_error_val = try o.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); + const non_error_val = try o.builder.intValue(Builder.Type.err_int, 0); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - _ = self.builder.buildStore(non_error_val, operand); + _ = try self.wip.store(.normal, non_error_val, operand, .default); return operand; } const err_union_llvm_ty = try o.lowerType(err_union_ty); { + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. - const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); - const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); + const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, ""); + _ = try self.wip.store(.normal, non_error_val, non_null_ptr, error_alignment); } // Then return the payload pointer (only if it is used). - if (self.liveness.isUnused(inst)) - return null; + if (self.liveness.isUnused(inst)) return .none; const payload_offset = errUnionPayloadOffset(payload_ty, mod); - return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); + return self.wip.gepStruct(err_union_llvm_ty, operand, payload_offset, ""); } - fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !?*llvm.Value { - return self.err_ret_trace.?; + fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !Builder.Value { + assert(self.err_ret_trace != .none); + return self.err_ret_trace; } - fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - self.err_ret_trace = operand; - return null; + self.err_ret_trace = try self.resolveInst(un_op); + return .none; } - fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; const mod = o.module; const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try o.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); + assert(self.err_ret_trace != .none); + const field_ptr = + try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .child = llvm_field.ty.toIntern(), .flags = .{ @@ -6845,34 +7191,32 @@ pub const FuncGen = struct { return self.load(field_ptr, field_ptr_ty); } - fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.typeOf(ty_op.operand); - const non_null_bit = self.context.intType(8).constInt(1, .False); + const non_null_bit = try o.builder.intValue(.i8, 1); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload(mod)) { - return operand; - } + if (optional_ty.optionalReprIsPayload(mod)) return operand; const llvm_optional_ty = try o.lowerType(optional_ty); if (isByRef(optional_ty, mod)) { - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); + const alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod)); + const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment); + const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); - try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); - const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); - _ = self.builder.buildStore(non_null_bit, non_null_ptr); + try self.store(payload_ptr, payload_ptr_ty, operand, .none); + const non_null_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 1, ""); + _ = try self.wip.store(.normal, non_null_bit, non_null_ptr, .default); return optional_ptr; } - const partial = self.builder.buildInsertValue(llvm_optional_ty.getUndef(), operand, 0, ""); - return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); + return self.wip.buildAggregate(llvm_optional_ty, &.{ operand, non_null_bit }, ""); } - fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -6882,46 +7226,47 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } - const ok_err_code = (try o.lowerType(Type.anyerror)).constNull(); + const ok_err_code = try o.builder.intValue(Builder.Type.err_int, 0); const err_un_llvm_ty = try o.lowerType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); - const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); - const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); + const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment); + const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); + _ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment); + const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); - try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); + try self.store(payload_ptr, payload_ptr_ty, operand, .none); return result_ptr; } - - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, error_offset, ""); - return self.builder.buildInsertValue(partial, operand, payload_offset, ""); + var fields: [2]Builder.Value = undefined; + fields[payload_offset] = operand; + fields[error_offset] = ok_err_code; + return self.wip.buildAggregate(err_un_llvm_ty, &fields, ""); } - fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return operand; - } + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return operand; const err_un_llvm_ty = try o.lowerType(err_un_ty); const payload_offset = errUnionPayloadOffset(payload_ty, mod); const error_offset = errUnionErrorOffset(payload_ty, mod); if (isByRef(err_un_ty, mod)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); - const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); - const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); - const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); + const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment); + const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, ""); + const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod)); + _ = try self.wip.store(.normal, operand, err_ptr, error_alignment); + const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, ""); const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; @@ -6929,34 +7274,52 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, error_offset, ""); // TODO set payload bytes to undef - return partial; + const undef = try o.builder.undefValue(err_un_llvm_ty); + return self.wip.insertValue(undef, operand, &.{error_offset}, ""); } - fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; - const llvm_u32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size", &.{llvm_u32}); - const args: [1]*llvm.Value = .{llvm_u32.constInt(index, .False)}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.size", &.{.i32}); + const args: [1]*llvm.Value = .{ + (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), + }; + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{.i32}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const index = pl_op.payload; const operand = try self.resolveInst(pl_op.operand); - const llvm_u32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow", &.{llvm_u32}); + const llvm_fn = try self.getIntrinsic("llvm.wasm.memory.grow", &.{.i32}); const args: [2]*llvm.Value = .{ - llvm_u32.constInt(index, .False), - operand, + (try o.builder.intConst(.i32, index)).toLlvm(&o.builder), + operand.toLlvm(&self.wip), }; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{ .i32, .i32 }, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; @@ -6967,19 +7330,20 @@ pub const FuncGen = struct { const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); - const loaded_vector = blk: { - const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); - const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); - break :blk load_inst; + const kind: Builder.MemoryAccessKind = switch (vector_ptr_ty.isVolatilePtr(mod)) { + false => .normal, + true => .@"volatile", }; - const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); - try self.store(vector_ptr, vector_ptr_ty, modified_vector, .NotAtomic); - return null; + const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod)); + const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod)); + const loaded = try self.wip.load(kind, elem_llvm_ty, vector_ptr, alignment, ""); + + const new_vector = try self.wip.insertElement(loaded, operand, index, ""); + _ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none); + return .none; } - fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -6988,11 +7352,13 @@ pub const FuncGen = struct { const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); - return self.builder.buildUMin(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smin." + else + .@"llvm.umin.", lhs, rhs, ""); } - fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7001,26 +7367,23 @@ pub const FuncGen = struct { const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); - return self.builder.buildUMax(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smax." + else + .@"llvm.umax.", lhs, rhs, ""); } - fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.typeOfIndex(inst); - const llvm_slice_ty = try o.lowerType(inst_ty); - - // In case of slicing a global, the result type looks something like `{ i8*, i64 }` - // but `ptr` is pointing to the global directly. - const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, ""); - return self.builder.buildInsertValue(partial, len, 1, ""); + return self.wip.buildAggregate(try o.lowerType(inst_ty), &.{ ptr, len }, ""); } - fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7032,8 +7395,7 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); - return self.builder.buildNUWAdd(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"add nsw" else .@"add nuw", lhs, rhs, ""); } fn airSafeArithmetic( @@ -7041,7 +7403,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, - ) !?*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; @@ -7057,42 +7419,50 @@ pub const FuncGen = struct { false => unsigned_intrinsic, }; const llvm_inst_ty = try o.lowerType(inst_ty); - const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); - const result_struct = fg.builder.buildCall( - llvm_fn.globalGetValueType(), + const llvm_ret_ty = try o.builder.structType(.normal, &.{ + llvm_inst_ty, + try llvm_inst_ty.changeScalar(.i1, &o.builder), + }); + const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_inst_ty, llvm_inst_ty }, .normal); + const llvm_fn = try fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty}); + const result_struct = (try fg.wip.unimplemented(llvm_ret_ty, "")).finish(fg.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), llvm_fn, - &[_]*llvm.Value{ lhs, rhs }, + &[_]*llvm.Value{ lhs.toLlvm(&fg.wip), rhs.toLlvm(&fg.wip) }, 2, .Fast, .Auto, "", - ); - const overflow_bit = fg.builder.buildExtractValue(result_struct, 1, ""); + ), &fg.wip); + const overflow_bit = try fg.wip.extractValue(result_struct, &.{1}, ""); const scalar_overflow_bit = switch (is_scalar) { true => overflow_bit, - false => fg.builder.buildOrReduce(overflow_bit), + false => (try fg.wip.unimplemented(.i1, "")).finish( + fg.builder.buildOrReduce(overflow_bit.toLlvm(&fg.wip)), + &fg.wip, + ), }; - const fail_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowFail"); - const ok_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowOk"); - _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block, ok_block); + const fail_block = try fg.wip.block(1, "OverflowFail"); + const ok_block = try fg.wip.block(1, "OverflowOk"); + _ = try fg.wip.brCond(scalar_overflow_bit, fail_block, ok_block); - fg.builder.positionBuilderAtEnd(fail_block); + fg.wip.cursor = .{ .block = fail_block }; try fg.buildSimplePanic(.integer_overflow); - fg.builder.positionBuilderAtEnd(ok_block); - return fg.builder.buildExtractValue(result_struct, 0, ""); + fg.wip.cursor = .{ .block = ok_block }; + return fg.wip.extractValue(result_struct, &.{0}, ""); } - fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildAdd(lhs, rhs, ""); + return self.wip.bin(.add, lhs, rhs, ""); } - fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7102,12 +7472,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); - - return self.builder.buildUAddSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.sadd.sat." + else + .@"llvm.uadd.sat.", lhs, rhs, ""); } - fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7119,19 +7490,18 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); - return self.builder.buildNUWSub(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"sub nsw" else .@"sub nuw", lhs, rhs, ""); } - fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildSub(lhs, rhs, ""); + return self.wip.bin(.sub, lhs, rhs, ""); } - fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7141,11 +7511,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); - return self.builder.buildUSubSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.ssub.sat." + else + .@"llvm.usub.sat.", lhs, rhs, ""); } - fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7157,19 +7529,18 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); - return self.builder.buildNUWMul(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .@"mul nsw" else .@"mul nuw", lhs, rhs, ""); } - fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildMul(lhs, rhs, ""); + return self.wip.bin(.mul, lhs, rhs, ""); } - fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7179,11 +7550,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); - return self.builder.buildUMulFixSat(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"llvm.smul.fix.sat." + else + .@"llvm.umul.fix.sat.", lhs, rhs, ""); } - fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7194,7 +7567,7 @@ pub const FuncGen = struct { return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } - fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7209,11 +7582,10 @@ pub const FuncGen = struct { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); - return self.builder.buildUDiv(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) .sdiv else .udiv, lhs, rhs, ""); } - fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7230,31 +7602,24 @@ pub const FuncGen = struct { } if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try o.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerType(scalar_ty); - - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); - - @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); - break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); + const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst( + inst_llvm_ty.scalarType(&o.builder), + inst_llvm_ty.scalarBits(&o.builder) - 1, + )); - const div = self.builder.buildSDiv(lhs, rhs, ""); - const rem = self.builder.buildSRem(lhs, rhs, ""); - const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); - const zero = inst_llvm_ty.constNull(); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); - const correction = self.builder.buildSelect(rem_nonzero, div_sign_mask, zero, ""); - return self.builder.buildNSWAdd(div, correction, ""); + const div = try self.wip.bin(.sdiv, lhs, rhs, ""); + const rem = try self.wip.bin(.srem, lhs, rhs, ""); + const div_sign = try self.wip.bin(.xor, lhs, rhs, ""); + const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, ""); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); + const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); + const correction = try self.wip.select(rem_nonzero, div_sign_mask, zero, ""); + return self.wip.bin(.@"add nsw", div, correction, ""); } - return self.builder.buildUDiv(lhs, rhs, ""); + return self.wip.bin(.udiv, lhs, rhs, ""); } - fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7266,11 +7631,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); - return self.builder.buildExactUDiv(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .@"sdiv exact" + else + .@"udiv exact", lhs, rhs, ""); } - fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7282,11 +7649,13 @@ pub const FuncGen = struct { const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); - return self.builder.buildURem(lhs, rhs, ""); + return self.wip.bin(if (scalar_ty.isSignedInt(mod)) + .srem + else + .urem, lhs, rhs, ""); } - fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; @@ -7302,36 +7671,29 @@ pub const FuncGen = struct { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); const b = try self.buildFloatOp(.add, inst_ty, 2, .{ a, rhs }); const c = try self.buildFloatOp(.fmod, inst_ty, 2, .{ b, rhs }); - const zero = inst_llvm_ty.constNull(); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); - return self.builder.buildSelect(ltz, c, a, ""); + return self.wip.select(ltz, c, a, ""); } if (scalar_ty.isSignedInt(mod)) { - const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(mod); - const scalar_llvm_ty = try o.lowerType(scalar_ty); - - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); + const bit_size_minus_one = try o.builder.splatValue(inst_llvm_ty, try o.builder.intConst( + inst_llvm_ty.scalarType(&o.builder), + inst_llvm_ty.scalarBits(&o.builder) - 1, + )); - @memset(shifts, scalar_llvm_ty.constInt(scalar_bit_size_minus_one, .False)); - break :const_vector llvm.constVector(shifts.ptr, vec_len); - } else inst_llvm_ty.constInt(scalar_bit_size_minus_one, .False); - - const rem = self.builder.buildSRem(lhs, rhs, ""); - const div_sign = self.builder.buildXor(lhs, rhs, ""); - const div_sign_mask = self.builder.buildAShr(div_sign, bit_size_minus_one, ""); - const rhs_masked = self.builder.buildAnd(rhs, div_sign_mask, ""); - const zero = inst_llvm_ty.constNull(); - const rem_nonzero = self.builder.buildICmp(.NE, rem, zero, ""); - const correction = self.builder.buildSelect(rem_nonzero, rhs_masked, zero, ""); - return self.builder.buildNSWAdd(rem, correction, ""); + const rem = try self.wip.bin(.srem, lhs, rhs, ""); + const div_sign = try self.wip.bin(.xor, lhs, rhs, ""); + const div_sign_mask = try self.wip.bin(.ashr, div_sign, bit_size_minus_one, ""); + const rhs_masked = try self.wip.bin(.@"and", rhs, div_sign_mask, ""); + const zero = try o.builder.zeroInitValue(inst_llvm_ty); + const rem_nonzero = try self.wip.icmp(.ne, rem, zero, ""); + const correction = try self.wip.select(rem_nonzero, rhs_masked, zero, ""); + return self.wip.bin(.@"add nsw", rem, correction, ""); } - return self.builder.buildURem(lhs, rhs, ""); + return self.wip.bin(.urem, lhs, rhs, ""); } - fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -7341,49 +7703,37 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); switch (ptr_ty.ptrSize(mod)) { - .One => { - // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, - .C, .Many => { - const indices: [1]*llvm.Value = .{offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, + // It's a pointer to an array, so according to LLVM we need an extra GEP index. + .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), offset, + }, ""), + .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{offset}, ""), .Slice => { - const base = self.builder.buildExtractValue(ptr, 0, ""); - const indices: [1]*llvm.Value = .{offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, ""); + const base = try self.wip.extractValue(ptr, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{offset}, ""); }, } } - fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const negative_offset = self.builder.buildNeg(offset, ""); + const negative_offset = try self.wip.neg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try o.lowerPtrElemTy(ptr_ty.childType(mod)); switch (ptr_ty.ptrSize(mod)) { - .One => { - // It's a pointer to an array, so according to LLVM we need an extra GEP index. - const indices: [2]*llvm.Value = .{ - self.context.intType(32).constNull(), negative_offset, - }; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, - .C, .Many => { - const indices: [1]*llvm.Value = .{negative_offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, ptr, &indices, indices.len, ""); - }, + // It's a pointer to an array, so according to LLVM we need an extra GEP index. + .One => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 0), negative_offset, + }, ""), + .C, .Many => return self.wip.gep(.inbounds, llvm_elem_ty, ptr, &.{negative_offset}, ""), .Slice => { - const base = self.builder.buildExtractValue(ptr, 0, ""); - const indices: [1]*llvm.Value = .{negative_offset}; - return self.builder.buildInBoundsGEP(llvm_elem_ty, base, &indices, indices.len, ""); + const base = try self.wip.extractValue(ptr, &.{0}, ""); + return self.wip.gep(.inbounds, llvm_elem_ty, base, &.{negative_offset}, ""); }, } } @@ -7393,7 +7743,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -7408,81 +7758,123 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = try o.lowerType(lhs_ty); const llvm_dest_ty = try o.lowerType(dest_ty); + const llvm_lhs_ty = try o.lowerType(lhs_ty); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); - const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); + const llvm_fn = try self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); + const llvm_ret_ty = try o.builder.structType( + .normal, + &.{ llvm_lhs_ty, try llvm_lhs_ty.changeScalar(.i1, &o.builder) }, + ); + const llvm_fn_ty = try o.builder.fnType(llvm_ret_ty, &.{ llvm_lhs_ty, llvm_lhs_ty }, .normal); + const result_struct = (try self.wip.unimplemented(llvm_ret_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &[_]*llvm.Value{ lhs.toLlvm(&self.wip), rhs.toLlvm(&self.wip) }, + 2, + .Fast, + .Auto, + "", + ), + &self.wip, + ); - const result = self.builder.buildExtractValue(result_struct, 0, ""); - const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); + const result = try self.wip.extractValue(result_struct, &.{0}, ""); + const overflow_bit = try self.wip.extractValue(result_struct, &.{1}, ""); const result_index = llvmField(dest_ty, 0, mod).?.index; const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); + const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); - const store_inst = self.builder.buildStore(result, field_ptr); - store_inst.setAlignment(result_alignment); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); + _ = try self.wip.store(.normal, result, field_ptr, result_alignment); } { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, ""); - const store_inst = self.builder.buildStore(overflow_bit, field_ptr); - store_inst.setAlignment(1); + const overflow_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, ""); + _ = try self.wip.store(.normal, overflow_bit, field_ptr, overflow_alignment); } return alloca_inst; } - const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, ""); - return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, ""); + var fields: [2]Builder.Value = undefined; + fields[result_index] = result; + fields[overflow_index] = overflow_bit; + return self.wip.buildAggregate(llvm_dest_ty, &fields, ""); } fn buildElementwiseCall( self: *FuncGen, - llvm_fn: *llvm.Value, - args_vectors: []const *llvm.Value, - result_vector: *llvm.Value, + llvm_fn: Builder.Function.Index, + args_vectors: []const Builder.Value, + result_vector: Builder.Value, vector_len: usize, - ) !*llvm.Value { - const args_len = @as(c_uint, @intCast(args_vectors.len)); - const llvm_i32 = self.context.intType(32); - assert(args_len <= 3); + ) !Builder.Value { + const o = self.dg.object; + assert(args_vectors.len <= 3); + + const llvm_fn_ty = llvm_fn.typeOf(&o.builder); + const llvm_scalar_ty = llvm_fn_ty.functionReturn(&o.builder); var i: usize = 0; var result = result_vector; while (i < vector_len) : (i += 1) { - const index_i32 = llvm_i32.constInt(i, .False); + const index_i32 = try o.builder.intValue(.i32, i); var args: [3]*llvm.Value = undefined; - for (args_vectors, 0..) |arg_vector, k| { - args[k] = self.builder.buildExtractElement(arg_vector, index_i32, ""); + for (args[0..args_vectors.len], args_vectors) |*arg_elem, arg_vector| { + arg_elem.* = (try self.wip.extractElement(arg_vector, index_i32, "")).toLlvm(&self.wip); } - const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, ""); - result = self.builder.buildInsertElement(result, result_elem, index_i32, ""); + const result_elem = (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + &args, + @intCast(args_vectors.len), + .C, + .Auto, + "", + ), + &self.wip, + ); + result = try self.wip.insertElement(result, result_elem, index_i32, ""); } return result; } fn getLibcFunction( self: *FuncGen, - fn_name: [:0]const u8, - param_types: []const *llvm.Type, - return_type: *llvm.Type, - ) *llvm.Value { - const o = self.dg.object; - return o.llvm_module.getNamedFunction(fn_name.ptr) orelse b: { - const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len); - break :b if (alias) |a| a.getAliasee() else null; - } orelse b: { - const params_len = @as(c_uint, @intCast(param_types.len)); - const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False); - const f = o.llvm_module.addFunction(fn_name, fn_type); - break :b f; + fn_name: Builder.String, + param_types: []const Builder.Type, + return_type: Builder.Type, + ) Allocator.Error!Builder.Function.Index { + const o = self.dg.object; + if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) { + .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function, + .function => |function| function, + else => unreachable, + }; + + const fn_type = try o.builder.fnType(return_type, param_types, .normal); + const f = o.llvm_module.addFunction(fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); + + var global = Builder.Global{ + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), }; + + try o.builder.llvm.globals.append(self.gpa, f); + _ = try o.builder.addGlobal(fn_name, global); + try o.builder.functions.append(self.gpa, function); + return global.kind.function; } /// Creates a floating point comparison by lowering to the appropriate @@ -7491,8 +7883,8 @@ pub const FuncGen = struct { self: *FuncGen, pred: math.CompareOperator, ty: Type, - params: [2]*llvm.Value, - ) !*llvm.Value { + params: [2]Builder.Value, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const target = o.module.getTarget(); @@ -7500,20 +7892,19 @@ pub const FuncGen = struct { const scalar_llvm_ty = try o.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { - const llvm_predicate: llvm.RealPredicate = switch (pred) { - .eq => .OEQ, - .neq => .UNE, - .lt => .OLT, - .lte => .OLE, - .gt => .OGT, - .gte => .OGE, + const cond: Builder.FloatCondition = switch (pred) { + .eq => .oeq, + .neq => .une, + .lt => .olt, + .lte => .ole, + .gt => .ogt, + .gte => .oge, }; - return self.builder.buildFCmp(llvm_predicate, params[0], params[1], ""); + return self.wip.fcmp(cond, params[0], params[1], ""); } const float_bits = scalar_ty.floatBits(target); const compiler_rt_float_abbrev = compilerRtFloatAbbrev(float_bits); - var fn_name_buf: [64]u8 = undefined; const fn_base_name = switch (pred) { .neq => "ne", .eq => "eq", @@ -7522,37 +7913,50 @@ pub const FuncGen = struct { .gt => "gt", .gte => "ge", }; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f2", .{ - fn_base_name, compiler_rt_float_abbrev, - }) catch unreachable; - - const param_types = [2]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty }; - const llvm_i32 = self.context.intType(32); - const libc_fn = self.getLibcFunction(fn_name, param_types[0..], llvm_i32); - - const zero = llvm_i32.constInt(0, .False); - const int_pred: llvm.IntPredicate = switch (pred) { - .eq => .EQ, - .neq => .NE, - .lt => .SLT, - .lte => .SLE, - .gt => .SGT, - .gte => .SGE, + const fn_name = try o.builder.fmt("__{s}{s}f2", .{ fn_base_name, compiler_rt_float_abbrev }); + + const libc_fn = try self.getLibcFunction( + fn_name, + ([1]Builder.Type{scalar_llvm_ty} ** 2)[0..], + .i32, + ); + + const zero = try o.builder.intConst(.i32, 0); + const int_cond: Builder.IntegerCondition = switch (pred) { + .eq => .eq, + .neq => .ne, + .lt => .slt, + .lte => .sle, + .gt => .sgt, + .gte => .sge, }; if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(mod); - const vector_result_ty = llvm_i32.vectorType(vec_len); + const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32); - var result = vector_result_ty.getUndef(); - result = try self.buildElementwiseCall(libc_fn, ¶ms, result, vec_len); + const init = try o.builder.poisonValue(vector_result_ty); + const result = try self.buildElementwiseCall(libc_fn, ¶ms, init, vec_len); - const zero_vector = self.builder.buildVectorSplat(vec_len, zero, ""); - return self.builder.buildICmp(int_pred, result, zero_vector, ""); + const zero_vector = try o.builder.splatValue(vector_result_ty, zero); + return self.wip.icmp(int_cond, result, zero_vector, ""); } - const result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, ¶ms, params.len, .C, .Auto, ""); - return self.builder.buildICmp(int_pred, result, zero, ""); + const llvm_fn_ty = libc_fn.typeOf(&o.builder); + const llvm_params = [2]*llvm.Value{ params[0].toLlvm(&self.wip), params[1].toLlvm(&self.wip) }; + const result = (try self.wip.unimplemented( + llvm_fn_ty.functionReturn(&o.builder), + "", + )).finish(self.builder.buildCall( + libc_fn.typeOf(&o.builder).toLlvm(&o.builder), + libc_fn.toLlvm(&o.builder), + &llvm_params, + llvm_params.len, + .C, + .Auto, + "", + ), &self.wip); + return self.wip.icmp(int_cond, result, zero.toValue(), ""); } const FloatOp = enum { @@ -7583,7 +7987,7 @@ pub const FuncGen = struct { const FloatOpStrat = union(enum) { intrinsic: []const u8, - libc: [:0]const u8, + libc: Builder.String, }; /// Creates a floating point operation (add, sub, fma, sqrt, exp, etc.) @@ -7594,27 +7998,25 @@ pub const FuncGen = struct { comptime op: FloatOp, ty: Type, comptime params_len: usize, - params: [params_len]*llvm.Value, - ) !*llvm.Value { + params: [params_len]Builder.Value, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); const llvm_ty = try o.lowerType(ty); - const scalar_llvm_ty = try o.lowerType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); - var fn_name_buf: [64]u8 = undefined; const strat: FloatOpStrat = if (intrinsics_allowed) switch (op) { // Some operations are dedicated LLVM instructions, not available as intrinsics - .neg => return self.builder.buildFNeg(params[0], ""), - .add => return self.builder.buildFAdd(params[0], params[1], ""), - .sub => return self.builder.buildFSub(params[0], params[1], ""), - .mul => return self.builder.buildFMul(params[0], params[1], ""), - .div => return self.builder.buildFDiv(params[0], params[1], ""), - .fmod => return self.builder.buildFRem(params[0], params[1], ""), - .fmax => return self.builder.buildMaxNum(params[0], params[1], ""), - .fmin => return self.builder.buildMinNum(params[0], params[1], ""), + .neg => return self.wip.un(.fneg, params[0], ""), + .add => return self.wip.bin(.fadd, params[0], params[1], ""), + .sub => return self.wip.bin(.fsub, params[0], params[1], ""), + .mul => return self.wip.bin(.fmul, params[0], params[1], ""), + .div => return self.wip.bin(.fdiv, params[0], params[1], ""), + .fmod => return self.wip.bin(.frem, params[0], params[1], ""), + .fmax => return self.wip.bin(.@"llvm.maxnum.", params[0], params[1], ""), + .fmin => return self.wip.bin(.@"llvm.minnum.", params[0], params[1], ""), else => .{ .intrinsic = "llvm." ++ @tagName(op) }, } else b: { const float_bits = scalar_ty.floatBits(target); @@ -7622,26 +8024,19 @@ pub const FuncGen = struct { .neg => { // In this case we can generate a softfloat negation by XORing the // bits with a constant. - const int_llvm_ty = self.context.intType(float_bits); - const one = int_llvm_ty.constInt(1, .False); - const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); - const sign_mask = one.constShl(shift_amt); - const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); - const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); - break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); - } else blk: { - const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, ""); - break :blk self.builder.buildXor(bitcasted_operand, sign_mask, ""); - }; - return self.builder.buildBitCast(result, llvm_ty, ""); - }, - .add, .sub, .div, .mul => FloatOpStrat{ - .libc = std.fmt.bufPrintZ(&fn_name_buf, "__{s}{s}f3", .{ - @tagName(op), compilerRtFloatAbbrev(float_bits), - }) catch unreachable, + const int_ty = try o.builder.intType(@intCast(float_bits)); + const cast_ty = try llvm_ty.changeScalar(int_ty, &o.builder); + const sign_mask = try o.builder.splatValue( + cast_ty, + try o.builder.intConst(int_ty, @as(u128, 1) << @intCast(float_bits - 1)), + ); + const bitcasted_operand = try self.wip.cast(.bitcast, params[0], cast_ty, ""); + const result = try self.wip.bin(.xor, bitcasted_operand, sign_mask, ""); + return self.wip.cast(.bitcast, result, llvm_ty, ""); }, + .add, .sub, .div, .mul => .{ .libc = try o.builder.fmt("__{s}{s}f3", .{ + @tagName(op), compilerRtFloatAbbrev(float_bits), + }) }, .ceil, .cos, .exp, @@ -7660,31 +8055,48 @@ pub const FuncGen = struct { .sqrt, .tan, .trunc, - => FloatOpStrat{ - .libc = std.fmt.bufPrintZ(&fn_name_buf, "{s}{s}{s}", .{ - libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits), - }) catch unreachable, - }, + => .{ .libc = try o.builder.fmt("{s}{s}{s}", .{ + libcFloatPrefix(float_bits), @tagName(op), libcFloatSuffix(float_bits), + }) }, }; }; - const llvm_fn: *llvm.Value = switch (strat) { - .intrinsic => |fn_name| self.getIntrinsic(fn_name, &.{llvm_ty}), + const llvm_fn = switch (strat) { + .intrinsic => |fn_name| try self.getIntrinsic(fn_name, &.{llvm_ty}), .libc => |fn_name| b: { - const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; - const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); + const scalar_llvm_ty = llvm_ty.scalarType(&o.builder); + const libc_fn = try self.getLibcFunction( + fn_name, + ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len], + scalar_llvm_ty, + ); if (ty.zigTypeTag(mod) == .Vector) { - const result = llvm_ty.getUndef(); + const result = try o.builder.poisonValue(llvm_ty); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } - break :b libc_fn; + break :b libc_fn.toLlvm(&o.builder); }, }; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params_len, .C, .Auto, ""); + const llvm_fn_ty = try o.builder.fnType( + llvm_ty, + ([1]Builder.Type{llvm_ty} ** 3)[0..params.len], + .normal, + ); + var llvm_params: [params_len]*llvm.Value = undefined; + for (&llvm_params, params) |*llvm_param, param| llvm_param.* = param.toLlvm(&self.wip); + return (try self.wip.unimplemented(llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + &llvm_params, + params_len, + .C, + .Auto, + "", + ), &self.wip); } - fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; @@ -7696,7 +8108,7 @@ pub const FuncGen = struct { return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } - fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -7706,72 +8118,67 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.typeOf(extra.lhs); - const rhs_ty = self.typeOf(extra.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.typeOfIndex(inst); const llvm_dest_ty = try o.lowerType(dest_ty); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); - const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) - self.builder.buildAShr(result, casted_rhs, "") + const result = try self.wip.bin(.shl, lhs, casted_rhs, ""); + const reconstructed = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .ashr else - self.builder.buildLShr(result, casted_rhs, ""); + .lshr, result, casted_rhs, ""); - const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); + const overflow_bit = try self.wip.icmp(.ne, lhs, reconstructed, ""); const result_index = llvmField(dest_ty, 0, mod).?.index; const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { - const result_alignment = dest_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); + const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment); { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); - const store_inst = self.builder.buildStore(result, field_ptr); - store_inst.setAlignment(result_alignment); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, ""); + _ = try self.wip.store(.normal, result, field_ptr, result_alignment); } { - const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, overflow_index, ""); - const store_inst = self.builder.buildStore(overflow_bit, field_ptr); - store_inst.setAlignment(1); + const field_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, ""); + _ = try self.wip.store(.normal, overflow_bit, field_ptr, field_alignment); } - return alloca_inst; } - const partial = self.builder.buildInsertValue(llvm_dest_ty.getUndef(), result, result_index, ""); - return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, ""); + var fields: [2]Builder.Value = undefined; + fields[result_index] = result; + fields[overflow_index] = overflow_bit; + return self.wip.buildAggregate(llvm_dest_ty, &fields, ""); } - fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildAnd(lhs, rhs, ""); + return self.wip.bin(.@"and", lhs, rhs, ""); } - fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airOr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildOr(lhs, rhs, ""); + return self.wip.bin(.@"or", lhs, rhs, ""); } - fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airXor(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - return self.builder.buildXor(lhs, rhs, ""); + return self.wip.bin(.xor, lhs, rhs, ""); } - fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7780,39 +8187,29 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); + return self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .@"shl nsw" else - rhs; - if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); - return self.builder.buildNUWShl(lhs, casted_rhs, ""); + .@"shl nuw", lhs, casted_rhs, ""); } - fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_type = self.typeOf(bin_op.lhs); - const rhs_type = self.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(mod); - const rhs_scalar_ty = rhs_type.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_type), "") - else - rhs; - return self.builder.buildShl(lhs, casted_rhs, ""); + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_type), ""); + return self.wip.bin(.shl, lhs, casted_rhs, ""); } - fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7821,42 +8218,36 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) - self.builder.buildZExt(rhs, lhs.typeOf(), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); - const result = if (lhs_scalar_ty.isSignedInt(mod)) - self.builder.buildSShlSat(lhs, casted_rhs, "") + const result = try self.wip.bin(if (lhs_scalar_ty.isSignedInt(mod)) + .@"llvm.sshl.sat." else - self.builder.buildUShlSat(lhs, casted_rhs, ""); + .@"llvm.ushl.sat.", lhs, casted_rhs, ""); // LLVM langref says "If b is (statically or dynamically) equal to or // larger than the integer bit width of the arguments, the result is a // poison value." // However Zig semantics says that saturating shift left can never produce // undefined; instead it saturates. - const lhs_scalar_llvm_ty = try o.lowerType(lhs_scalar_ty); - const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); - const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); - if (rhs_ty.zigTypeTag(mod) == .Vector) { - const vec_len = rhs_ty.vectorLen(mod); - const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); - const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); - const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); - return self.builder.buildSelect(in_range, result, lhs_max_vec, ""); - } else { - const in_range = self.builder.buildICmp(.ULT, rhs, bits, ""); - return self.builder.buildSelect(in_range, result, lhs_max, ""); - } + const lhs_llvm_ty = try o.lowerType(lhs_ty); + const lhs_scalar_llvm_ty = lhs_llvm_ty.scalarType(&o.builder); + const bits = try o.builder.splatValue( + lhs_llvm_ty, + try o.builder.intConst(lhs_scalar_llvm_ty, lhs_bits), + ); + const lhs_max = try o.builder.splatValue( + lhs_llvm_ty, + try o.builder.intConst(lhs_scalar_llvm_ty, -1), + ); + const in_range = try self.wip.icmp(.ult, rhs, bits, ""); + return self.wip.select(in_range, result, lhs_max, ""); } - fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -7865,63 +8256,41 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.typeOf(bin_op.lhs); - const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); - const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) - self.builder.buildZExt(rhs, try o.lowerType(lhs_ty), "") - else - rhs; + const casted_rhs = try self.wip.conv(.unsigned, rhs, try o.lowerType(lhs_ty), ""); const is_signed_int = lhs_scalar_ty.isSignedInt(mod); - if (is_exact) { - if (is_signed_int) { - return self.builder.buildAShrExact(lhs, casted_rhs, ""); - } else { - return self.builder.buildLShrExact(lhs, casted_rhs, ""); - } - } else { - if (is_signed_int) { - return self.builder.buildAShr(lhs, casted_rhs, ""); - } else { - return self.builder.buildLShr(lhs, casted_rhs, ""); - } - } + return self.wip.bin(if (is_exact) + if (is_signed_int) .@"ashr exact" else .@"lshr exact" + else if (is_signed_int) .ashr else .lshr, lhs, casted_rhs, ""); } - fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try o.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); - if (operand_info.bits < dest_info.bits) { - switch (operand_info.signedness) { - .signed => return self.builder.buildSExt(operand, dest_llvm_ty, ""), - .unsigned => return self.builder.buildZExt(operand, dest_llvm_ty, ""), - } - } else if (operand_info.bits > dest_info.bits) { - return self.builder.buildTrunc(operand, dest_llvm_ty, ""); - } else { - return operand; - } + return self.wip.conv(switch (operand_info.signedness) { + .signed => .signed, + .unsigned => .unsigned, + }, operand, dest_llvm_ty, ""); } - fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); - return self.builder.buildTrunc(operand, dest_llvm_ty, ""); + return self.wip.cast(.trunc, operand, dest_llvm_ty, ""); } - fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -7933,26 +8302,30 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerType(dest_ty); - return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); + return self.wip.cast(.fptrunc, operand, try o.lowerType(dest_ty), ""); } else { const operand_llvm_ty = try o.lowerType(operand_ty); const dest_llvm_ty = try o.lowerType(dest_ty); - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{ + const fn_name = try o.builder.fmt("__trunc{s}f{s}f2", .{ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), - }) catch unreachable; - - const params = [1]*llvm.Value{operand}; - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + }); - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); + const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } } - fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -7964,36 +8337,40 @@ pub const FuncGen = struct { const src_bits = operand_ty.floatBits(target); if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) { - const dest_llvm_ty = try o.lowerType(dest_ty); - return self.builder.buildFPExt(operand, dest_llvm_ty, ""); + return self.wip.cast(.fpext, operand, try o.lowerType(dest_ty), ""); } else { const operand_llvm_ty = try o.lowerType(operand_ty); const dest_llvm_ty = try o.lowerType(dest_ty); - var fn_name_buf: [64]u8 = undefined; - const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{ + const fn_name = try o.builder.fmt("__extend{s}f{s}f2", .{ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits), - }) catch unreachable; - - const params = [1]*llvm.Value{operand}; - const param_types = [1]*llvm.Type{operand_llvm_ty}; - const llvm_fn = self.getLibcFunction(fn_name, ¶m_types, dest_llvm_ty); + }); - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); + const llvm_fn = try self.getLibcFunction(fn_name, &.{operand_llvm_ty}, dest_llvm_ty); + const params = [1]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(dest_llvm_ty, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); } } - fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); + const operand_ptr = try self.sliceOrArrayPtr(operand, ptr_ty); const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); - return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); + return self.wip.cast(.ptrtoint, operand_ptr, dest_llvm_ty, ""); } - fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { + fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const inst_ty = self.typeOfIndex(inst); @@ -8001,7 +8378,7 @@ pub const FuncGen = struct { return self.bitCast(operand, operand_ty, inst_ty); } - fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { + fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { const o = self.dg.object; const mod = o.module; const operand_is_ref = isByRef(operand_ty, mod); @@ -8013,14 +8390,14 @@ pub const FuncGen = struct { return operand; } - if (llvm_dest_ty.getTypeKind() == .Integer and - operand.typeOf().getTypeKind() == .Integer) + if (llvm_dest_ty.isInteger(&o.builder) and + operand.typeOfWip(&self.wip).isInteger(&o.builder)) { - return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); + return self.wip.conv(.unsigned, operand, llvm_dest_ty, ""); } if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { - return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); + return self.wip.cast(.inttoptr, operand, llvm_dest_ty, ""); } if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { @@ -8028,104 +8405,97 @@ pub const FuncGen = struct { if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } - const array_ptr = self.buildAlloca(llvm_dest_ty, null); + const array_ptr = try self.buildAlloca(llvm_dest_ty, .default); const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { - const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod)); + _ = try self.wip.store(.normal, operand, array_ptr, alignment); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = try o.lowerType(Type.usize); - const llvm_u32 = self.context.intType(32); - const zero = llvm_usize.constNull(); + const usize_zero = try o.builder.intValue(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = llvm_usize.constInt(i, .False); - const index_u32 = llvm_u32.constInt(i, .False); - const indexes: [2]*llvm.Value = .{ zero, index_usize }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_dest_ty, array_ptr, &indexes, indexes.len, ""); - const elem = self.builder.buildExtractElement(operand, index_u32, ""); - _ = self.builder.buildStore(elem, elem_ptr); + const elem_ptr = try self.wip.gep(.inbounds, llvm_dest_ty, array_ptr, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); + const elem = + try self.wip.extractElement(operand, try o.builder.intValue(.i32, i), ""); + _ = try self.wip.store(.normal, elem, elem_ptr, .default); } } return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try o.lowerType(inst_ty); - if (!operand_is_ref) { - return self.dg.todo("implement bitcast non-ref array to vector", .{}); - } + if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { - const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(mod)); - return vector; + const alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + return self.wip.load(.normal, llvm_vector_ty, operand, alignment, ""); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const array_llvm_ty = try o.lowerType(operand_ty); const elem_llvm_ty = try o.lowerType(elem_ty); const llvm_usize = try o.lowerType(Type.usize); - const llvm_u32 = self.context.intType(32); - const zero = llvm_usize.constNull(); + const usize_zero = try o.builder.intValue(llvm_usize, 0); const vector_len = operand_ty.arrayLen(mod); - var vector = llvm_vector_ty.getUndef(); + var vector = try o.builder.poisonValue(llvm_vector_ty); var i: u64 = 0; while (i < vector_len) : (i += 1) { - const index_usize = llvm_usize.constInt(i, .False); - const index_u32 = llvm_u32.constInt(i, .False); - const indexes: [2]*llvm.Value = .{ zero, index_usize }; - const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indexes, indexes.len, ""); - const elem = self.builder.buildLoad(elem_llvm_ty, elem_ptr, ""); - vector = self.builder.buildInsertElement(vector, elem, index_u32, ""); + const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, operand, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); + const elem = try self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, ""); + vector = + try self.wip.insertElement(vector, elem, try o.builder.intValue(.i32, i), ""); } - return vector; } } if (operand_is_ref) { - const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(mod)); - return load_inst; + const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod)); + return self.wip.load(.normal, llvm_dest_ty, operand, alignment, ""); } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); - const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); - const store_inst = self.builder.buildStore(operand, result_ptr); - store_inst.setAlignment(alignment); + const alignment = Builder.Alignment.fromByteUnits( + @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)), + ); + const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); + _ = try self.wip.store(.normal, operand, result_ptr, alignment); return result_ptr; } - if (llvm_dest_ty.getTypeKind() == .Struct) { + if (llvm_dest_ty.isStruct(&o.builder)) { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); - const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); - const store_inst = self.builder.buildStore(operand, result_ptr); - store_inst.setAlignment(alignment); - const load_inst = self.builder.buildLoad(llvm_dest_ty, result_ptr, ""); - load_inst.setAlignment(alignment); - return load_inst; + const alignment = Builder.Alignment.fromByteUnits( + @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)), + ); + const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment); + _ = try self.wip.store(.normal, operand, result_ptr, alignment); + return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, ""); } - return self.builder.buildBitCast(operand, llvm_dest_ty, ""); + return self.wip.cast(.bitcast, operand, llvm_dest_ty, ""); } - fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); return operand; } - fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const arg_val = self.args[self.arg_index]; @@ -8133,9 +8503,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); if (o.di_builder) |dib| { - if (needDbgVarWorkaround(o)) { - return arg_val; - } + if (needDbgVarWorkaround(o)) return arg_val; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const func_index = self.dg.decl.getOwnedFunctionIndex(); @@ -8150,61 +8518,64 @@ pub const FuncGen = struct { try o.lowerDebugType(inst_ty, .full), true, // always preserve 0, // flags - self.arg_index, // includes +1 because 0 is return type + @intCast(self.arg_index), // includes +1 because 0 is return type ); const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); - const insert_block = self.builder.getInsertBlock(); + const insert_block = self.wip.cursor.block.toLlvm(&self.wip); if (isByRef(inst_ty, mod)) { - _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); + _ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else if (o.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(mod); - const alloca = self.buildAlloca(arg_val.typeOf(), alignment); - const store_inst = self.builder.buildStore(arg_val, alloca); - store_inst.setAlignment(alignment); - _ = dib.insertDeclareAtEnd(alloca, di_local_var, debug_loc, insert_block); + const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod)); + const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); + _ = try self.wip.store(.normal, arg_val, alloca, alignment); + _ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } else { - _ = dib.insertDbgValueIntrinsicAtEnd(arg_val, di_local_var, debug_loc, insert_block); + _ = dib.insertDbgValueIntrinsicAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block); } } return arg_val; } - fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) - return o.lowerPtrToVoid(ptr_ty); + return (try o.lowerPtrToVoid(ptr_ty)).toValue(); const pointee_llvm_ty = try o.lowerType(pointee_type); - const alignment = ptr_ty.ptrAlignment(mod); + const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); return self.buildAlloca(pointee_llvm_ty, alignment); } - fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return o.lowerPtrToVoid(ptr_ty); - if (self.ret_ptr) |ret_ptr| return ret_ptr; + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) + return (try o.lowerPtrToVoid(ptr_ty)).toValue(); + if (self.ret_ptr != .none) return self.ret_ptr; const ret_llvm_ty = try o.lowerType(ret_ty); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + return self.buildAlloca(ret_llvm_ty, alignment); } /// Use this instead of builder.buildAlloca, because this function makes sure to /// put the alloca instruction at the top of the function! - fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type, alignment: ?c_uint) *llvm.Value { - const o = self.dg.object; - const mod = o.module; - const target = mod.getTarget(); - return buildAllocaInner(self.context, self.builder, self.llvm_func, self.di_scope != null, llvm_ty, alignment, target); + fn buildAlloca( + self: *FuncGen, + llvm_ty: Builder.Type, + alignment: Builder.Alignment, + ) Allocator.Error!Builder.Value { + const target = self.dg.object.module.getTarget(); + return buildAllocaInner(&self.wip, self.di_scope != null, llvm_ty, alignment, target); } - fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8217,25 +8588,30 @@ pub const FuncGen = struct { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. - const u8_llvm_ty = self.context.intType(8); const fill_byte = if (safety) - u8_llvm_ty.constInt(0xaa, .False) + try o.builder.intConst(.i8, 0xaa) else - u8_llvm_ty.getUndef(); + try o.builder.undefConst(.i8); const operand_size = operand_ty.abiSize(mod); - const usize_llvm_ty = try o.lowerType(Type.usize); - const len = usize_llvm_ty.constInt(operand_size, .False); - const dest_ptr_align = ptr_ty.ptrAlignment(mod); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); + const usize_ty = try o.lowerType(Type.usize); + const len = try o.builder.intValue(usize_ty, operand_size); + const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&o.builder), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + ptr_ty.isVolatilePtr(mod), + ), &self.wip); if (safety and mod.comp.bin_file.options.valgrind) { - self.valgrindMarkUndef(dest_ptr, len); + try self.valgrindMarkUndef(dest_ptr, len); } - return null; + return .none; } const src_operand = try self.resolveInst(bin_op.rhs); - try self.store(dest_ptr, ptr_ty, src_operand, .NotAtomic); - return null; + try self.store(dest_ptr, ptr_ty, src_operand, .none); + return .none; } /// As an optimization, we want to avoid unnecessary copies of isByRef=true @@ -8260,7 +8636,7 @@ pub const FuncGen = struct { return false; } - fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { const o = fg.dg.object; const mod = o.module; const inst = body_tail[0]; @@ -8277,22 +8653,40 @@ pub const FuncGen = struct { return fg.load(ptr, ptr_ty); } - fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const llvm_fn = self.getIntrinsic("llvm.trap", &.{}); - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .Cold, .Auto, ""); - _ = self.builder.buildUnreachable(); - return null; + const o = self.dg.object; + const llvm_fn = try self.getIntrinsic("llvm.trap", &.{}); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + undefined, + 0, + .Cold, + .Auto, + "", + ), &self.wip); + _ = try self.wip.@"unreachable"(); + return .none; } - fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{}); - _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .C, .Auto, ""); - return null; + const o = self.dg.object; + const llvm_fn = try self.getIntrinsic("llvm.debugtrap", &.{}); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.void, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + undefined, + 0, + .C, + .Auto, + "", + ), &self.wip); + return .none; } - fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; const mod = o.module; @@ -8300,43 +8694,61 @@ pub const FuncGen = struct { const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 - return llvm_usize.constNull(); + return o.builder.intValue(llvm_usize, 0); } - const llvm_i32 = self.context.intType(32); - const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); - const params = [_]*llvm.Value{llvm_i32.constNull()}; - const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); + const llvm_fn = try self.getIntrinsic("llvm.returnaddress", &.{}); + const params = [_]*llvm.Value{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + }; + const ptr_val = (try self.wip.unimplemented(.ptr, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.ptr, &.{.i32}, .normal)).toLlvm(&o.builder), + llvm_fn, + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), &self.wip); + return self.wip.cast(.ptrtoint, ptr_val, llvm_usize, ""); } - fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; const o = self.dg.object; - const llvm_i32 = self.context.intType(32); const llvm_fn_name = "llvm.frameaddress.p0"; const llvm_fn = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - const llvm_p0i8 = self.context.pointerType(0); - const param_types = [_]*llvm.Type{llvm_i32}; - const fn_type = llvm.functionType(llvm_p0i8, ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.ptr, &.{.i32}, .normal); + break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type.toLlvm(&o.builder)); }; + const llvm_fn_ty = try o.builder.fnType(.ptr, &.{.i32}, .normal); - const params = [_]*llvm.Value{llvm_i32.constNull()}; - const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try o.lowerType(Type.usize); - return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); + const params = [_]*llvm.Value{ + (try o.builder.intConst(.i32, 0)).toLlvm(&o.builder), + }; + const ptr_val = (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn, + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), + &self.wip, + ); + return self.wip.cast(.ptrtoint, ptr_val, try o.lowerType(Type.usize), ""); } - fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airFence(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const atomic_order = self.air.instructions.items(.data)[inst].fence; - const llvm_memory_order = toLlvmAtomicOrdering(atomic_order); - const single_threaded = llvm.Bool.fromBool(self.single_threaded); - _ = self.builder.buildFence(llvm_memory_order, single_threaded, ""); - return null; + const ordering = toLlvmAtomicOrdering(atomic_order); + _ = try self.wip.fence(self.sync_scope, ordering); + return .none; } - fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -8345,46 +8757,51 @@ pub const FuncGen = struct { var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); const operand_ty = self.typeOf(extra.ptr).childType(mod); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, false); - if (opt_abi_ty) |abi_ty| { + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); + if (llvm_abi_ty != .none) { // operand needs widening and truncating - if (operand_ty.isSignedInt(mod)) { - expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); - new_value = self.builder.buildSExt(new_value, abi_ty, ""); - } else { - expected_value = self.builder.buildZExt(expected_value, abi_ty, ""); - new_value = self.builder.buildZExt(new_value, abi_ty, ""); - } + const signedness: Builder.Function.Instruction.Cast.Signedness = + if (operand_ty.isSignedInt(mod)) .signed else .unsigned; + expected_value = try self.wip.conv(signedness, expected_value, llvm_abi_ty, ""); + new_value = try self.wip.conv(signedness, new_value, llvm_abi_ty, ""); } - const result = self.builder.buildAtomicCmpXchg( - ptr, - expected_value, - new_value, - toLlvmAtomicOrdering(extra.successOrder()), - toLlvmAtomicOrdering(extra.failureOrder()), - llvm.Bool.fromBool(self.single_threaded), + + const llvm_result_ty = try o.builder.structType(.normal, &.{ + if (llvm_abi_ty != .none) llvm_abi_ty else llvm_operand_ty, + .i1, + }); + const result = (try self.wip.unimplemented(llvm_result_ty, "")).finish( + self.builder.buildAtomicCmpXchg( + ptr.toLlvm(&self.wip), + expected_value.toLlvm(&self.wip), + new_value.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.successOrder()))), + @enumFromInt(@intFromEnum(toLlvmAtomicOrdering(extra.failureOrder()))), + llvm.Bool.fromBool(self.sync_scope == .singlethread), + ), + &self.wip, ); - result.setWeak(llvm.Bool.fromBool(is_weak)); + result.toLlvm(&self.wip).setWeak(llvm.Bool.fromBool(is_weak)); const optional_ty = self.typeOfIndex(inst); - var payload = self.builder.buildExtractValue(result, 0, ""); - if (opt_abi_ty != null) { - payload = self.builder.buildTrunc(payload, try o.lowerType(operand_ty), ""); - } - const success_bit = self.builder.buildExtractValue(result, 1, ""); + var payload = try self.wip.extractValue(result, &.{0}, ""); + if (llvm_abi_ty != .none) payload = try self.wip.cast(.trunc, payload, llvm_operand_ty, ""); + const success_bit = try self.wip.extractValue(result, &.{1}, ""); if (optional_ty.optionalReprIsPayload(mod)) { - return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); + const zero = try o.builder.zeroInitValue(payload.typeOfWip(&self.wip)); + return self.wip.select(success_bit, zero, payload, ""); } comptime assert(optional_layout_version == 3); - const non_null_bit = self.builder.buildNot(success_bit, ""); + const non_null_bit = try self.wip.not(success_bit, ""); return buildOptional(self, optional_ty, payload, non_null_bit); } - fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; @@ -8397,120 +8814,146 @@ pub const FuncGen = struct { const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); - const single_threaded = llvm.Bool.fromBool(self.single_threaded); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, op == .Xchg); - if (opt_abi_ty) |abi_ty| { + const single_threaded = llvm.Bool.fromBool(self.sync_scope == .singlethread); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, op == .Xchg); + const llvm_operand_ty = try o.lowerType(operand_ty); + if (llvm_abi_ty != .none) { // operand needs widening and truncating or bitcasting. - const casted_operand = if (is_float) - self.builder.buildBitCast(operand, abi_ty, "") - else if (is_signed_int) - self.builder.buildSExt(operand, abi_ty, "") - else - self.builder.buildZExt(operand, abi_ty, ""); + const casted_operand = try self.wip.cast( + if (is_float) .bitcast else if (is_signed_int) .sext else .zext, + @enumFromInt(@intFromEnum(operand)), + llvm_abi_ty, + "", + ); - const uncasted_result = self.builder.buildAtomicRmw( - op, - ptr, - casted_operand, - ordering, - single_threaded, + const uncasted_result = (try self.wip.unimplemented(llvm_abi_ty, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + casted_operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, ); - const operand_llvm_ty = try o.lowerType(operand_ty); + if (is_float) { - return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.bitcast, uncasted_result, llvm_operand_ty, ""); } else { - return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.trunc, uncasted_result, llvm_operand_ty, ""); } } - if (operand.typeOf().getTypeKind() != .Pointer) { - return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded); + if (!llvm_operand_ty.isPointer(&o.builder)) { + return (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, + ); } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = try o.lowerType(Type.usize); - const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); - const uncasted_result = self.builder.buildAtomicRmw( - op, - ptr, - casted_operand, - ordering, - single_threaded, + const llvm_usize = try o.lowerType(Type.usize); + const casted_operand = try self.wip.cast(.ptrtoint, operand, llvm_usize, ""); + const uncasted_result = (try self.wip.unimplemented(llvm_usize, "")).finish( + self.builder.buildAtomicRmw( + op, + ptr.toLlvm(&self.wip), + casted_operand.toLlvm(&self.wip), + @enumFromInt(@intFromEnum(ordering)), + single_threaded, + ), + &self.wip, ); - const operand_llvm_ty = try o.lowerType(operand_ty); - return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); + return self.wip.cast(.inttoptr, uncasted_result, llvm_operand_ty, ""); } - fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_info.child.toType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) - return null; + const info = ptr_ty.ptrInfo(mod); + const elem_ty = info.child.toType(); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; const ordering = toLlvmAtomicOrdering(atomic_load.order); - const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false); - const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse - ptr_info.child.toType().abiAlignment(mod))); - const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile); + const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false); + const ptr_alignment = Builder.Alignment.fromByteUnits( + info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod), + ); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; const elem_llvm_ty = try o.lowerType(elem_ty); - if (opt_abi_llvm_ty) |abi_llvm_ty| { + if (llvm_abi_ty != .none) { // operand needs widening and truncating - const load_inst = self.builder.buildLoad(abi_llvm_ty, ptr, ""); - load_inst.setAlignment(ptr_alignment); - load_inst.setVolatile(ptr_volatile); - load_inst.setOrdering(ordering); - return self.builder.buildTrunc(load_inst, elem_llvm_ty, ""); + const loaded = try self.wip.loadAtomic( + ptr_kind, + llvm_abi_ty, + ptr, + self.sync_scope, + ordering, + ptr_alignment, + "", + ); + return self.wip.cast(.trunc, loaded, elem_llvm_ty, ""); } - const load_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); - load_inst.setAlignment(ptr_alignment); - load_inst.setVolatile(ptr_volatile); - load_inst.setOrdering(ordering); - return load_inst; + return self.wip.loadAtomic( + ptr_kind, + elem_llvm_ty, + ptr, + self.sync_scope, + ordering, + ptr_alignment, + "", + ); } fn airAtomicStore( self: *FuncGen, inst: Air.Inst.Index, - ordering: llvm.AtomicOrdering, - ) !?*llvm.Value { + ordering: Builder.AtomicOrdering, + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); - const opt_abi_ty = o.getAtomicAbiType(operand_ty, false); + const llvm_abi_ty = try o.getAtomicAbiType(operand_ty, false); - if (opt_abi_ty) |abi_ty| { + if (llvm_abi_ty != .none) { // operand needs widening - if (operand_ty.isSignedInt(mod)) { - element = self.builder.buildSExt(element, abi_ty, ""); - } else { - element = self.builder.buildZExt(element, abi_ty, ""); - } + element = try self.wip.conv( + if (operand_ty.isSignedInt(mod)) .signed else .unsigned, + element, + llvm_abi_ty, + "", + ); } try self.store(ptr, ptr_ty, element, ordering); - return null; + return .none; } - fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const target = mod.getTarget(); - const dest_ptr_align = ptr_ty.ptrAlignment(mod); - const u8_llvm_ty = self.context.intType(8); - const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); + const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); // Any WebAssembly runtime will trap when the destination pointer is out-of-bounds, regardless @@ -8527,20 +8970,26 @@ pub const FuncGen = struct { // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. const fill_byte = if (safety) - u8_llvm_ty.constInt(0xaa, .False) + try o.builder.intValue(.i8, 0xaa) else - u8_llvm_ty.getUndef(); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + try o.builder.undefValue(.i8); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } if (safety and mod.comp.bin_file.options.valgrind) { - self.valgrindMarkUndef(dest_ptr, len); + try self.valgrindMarkUndef(dest_ptr, len); } - return null; + return .none; } // Test if the element value is compile-time known to be a @@ -8548,18 +8997,21 @@ pub const FuncGen = struct { // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { - const fill_byte = try self.resolveValue(.{ - .ty = Type.u8, - .val = byte_val, - }); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val }); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { - try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + try self.safeWasmMemset(dest_ptr, fill_byte.toValue(), len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&o.builder), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } - return null; + return .none; } } @@ -8569,14 +9021,20 @@ pub const FuncGen = struct { if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. const fill_byte = try self.bitCast(value, elem_ty, Type.u8); - const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); if (intrinsic_len0_traps) { try self.safeWasmMemset(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); } else { - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); } - return null; + return .none; } // non-byte-sized element. lower with a loop. something like this: @@ -8584,88 +9042,92 @@ pub const FuncGen = struct { // entry: // ... // %end_ptr = getelementptr %ptr, %len - // br loop + // br %loop // loop: // %it_ptr = phi body %next_ptr, entry %ptr // %end = cmp eq %it_ptr, %end_ptr - // cond_br %end body, end + // br %end, %body, %end // body: // store %it_ptr, %value // %next_ptr = getelementptr %it_ptr, 1 - // br loop + // br %loop // end: // ... - const entry_block = self.builder.getInsertBlock(); - const loop_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetLoop"); - const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); + const entry_block = self.wip.cursor.block; + const loop_block = try self.wip.block(2, "InlineMemsetLoop"); + const body_block = try self.wip.block(1, "InlineMemsetBody"); + const end_block = try self.wip.block(1, "InlineMemsetEnd"); - const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); + const usize_ty = try o.lowerType(Type.usize); const len = switch (ptr_ty.ptrSize(mod)) { - .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), + .Slice => try self.wip.extractValue(dest_slice, &.{1}, ""), + .One => try o.builder.intValue(usize_ty, ptr_ty.childType(mod).arrayLen(mod)), .Many, .C => unreachable, }; const elem_llvm_ty = try o.lowerType(elem_ty); - const len_gep = [_]*llvm.Value{len}; - const end_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, dest_ptr, &len_gep, len_gep.len, ""); - _ = self.builder.buildBr(loop_block); + const end_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, dest_ptr, &.{len}, ""); + _ = try self.wip.br(loop_block); - self.builder.positionBuilderAtEnd(loop_block); - const it_ptr = self.builder.buildPhi(self.context.pointerType(0), ""); - const end = self.builder.buildICmp(.NE, it_ptr, end_ptr, ""); - _ = self.builder.buildCondBr(end, body_block, end_block); + self.wip.cursor = .{ .block = loop_block }; + const it_ptr = try self.wip.phi(.ptr, ""); + const end = try self.wip.icmp(.ne, it_ptr.toValue(), end_ptr, ""); + _ = try self.wip.brCond(end, body_block, end_block); - self.builder.positionBuilderAtEnd(body_block); + self.wip.cursor = .{ .block = body_block }; const elem_abi_alignment = elem_ty.abiAlignment(mod); - const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); + const it_ptr_alignment = Builder.Alignment.fromByteUnits( + @min(elem_abi_alignment, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)), + ); if (isByRef(elem_ty, mod)) { - _ = self.builder.buildMemCpy( - it_ptr, - it_ptr_alignment, - value, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + it_ptr.toValue().toLlvm(&self.wip), + @intCast(it_ptr_alignment.toByteUnits() orelse 0), + value.toLlvm(&self.wip), elem_abi_alignment, - llvm_usize_ty.constInt(elem_abi_size, .False), + (try o.builder.intConst(usize_ty, elem_abi_size)).toLlvm(&o.builder), is_volatile, - ); - } else { - const store_inst = self.builder.buildStore(value, it_ptr); - store_inst.setAlignment(it_ptr_alignment); - store_inst.setVolatile(llvm.Bool.fromBool(is_volatile)); - } - const one_gep = [_]*llvm.Value{llvm_usize_ty.constInt(1, .False)}; - const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, ""); - _ = self.builder.buildBr(loop_block); + ), &self.wip); + } else _ = try self.wip.store(switch (is_volatile) { + false => .normal, + true => .@"volatile", + }, value, it_ptr.toValue(), it_ptr_alignment); + const next_ptr = try self.wip.gep(.inbounds, elem_llvm_ty, it_ptr.toValue(), &.{ + try o.builder.intValue(usize_ty, 1), + }, ""); + _ = try self.wip.br(loop_block); - self.builder.positionBuilderAtEnd(end_block); - - const incoming_values: [2]*llvm.Value = .{ next_ptr, dest_ptr }; - const incoming_blocks: [2]*llvm.BasicBlock = .{ body_block, entry_block }; - it_ptr.addIncoming(&incoming_values, &incoming_blocks, 2); - - return null; + self.wip.cursor = .{ .block = end_block }; + try it_ptr.finish(&.{ next_ptr, dest_ptr }, &.{ body_block, entry_block }, &self.wip); + return .none; } fn safeWasmMemset( self: *FuncGen, - dest_ptr: *llvm.Value, - fill_byte: *llvm.Value, - len: *llvm.Value, - dest_ptr_align: u32, + dest_ptr: Builder.Value, + fill_byte: Builder.Value, + len: Builder.Value, + dest_ptr_align: Builder.Alignment, is_volatile: bool, ) !void { - const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth()); - const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq); - const memset_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapSkip"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "MemsetTrapEnd"); - _ = self.builder.buildCondBr(cond, memset_block, end_block); - self.builder.positionBuilderAtEnd(memset_block); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - _ = self.builder.buildBr(end_block); - self.builder.positionBuilderAtEnd(end_block); + const o = self.dg.object; + const llvm_usize_ty = try o.lowerType(Type.usize); + const cond = try self.cmp(len, try o.builder.intValue(llvm_usize_ty, 0), Type.usize, .neq); + const memset_block = try self.wip.block(1, "MemsetTrapSkip"); + const end_block = try self.wip.block(2, "MemsetTrapEnd"); + _ = try self.wip.brCond(cond, memset_block, end_block); + self.wip.cursor = .{ .block = memset_block }; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemSet( + dest_ptr.toLlvm(&self.wip), + fill_byte.toLlvm(&self.wip), + len.toLlvm(&self.wip), + @intCast(dest_ptr_align.toByteUnits() orelse 0), + is_volatile, + ), &self.wip); + _ = try self.wip.br(end_block); + self.wip.cursor = .{ .block = end_block }; } - fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -8673,9 +9135,9 @@ pub const FuncGen = struct { const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); const src_ptr_ty = self.typeOf(bin_op.rhs); - const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); - const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); - const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); + const src_ptr = try self.sliceOrArrayPtr(src_slice, src_ptr_ty); + const len = try self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); + const dest_ptr = try self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); // When bulk-memory is enabled, this will be lowered to WebAssembly's memory.copy instruction. @@ -8687,84 +9149,81 @@ pub const FuncGen = struct { std.Target.wasm.featureSetHas(o.target.cpu.features, .bulk_memory) and dest_ptr_ty.isSlice(mod)) { - const llvm_usize_ty = self.context.intType(self.dg.object.target.ptrBitWidth()); - const cond = try self.cmp(len, llvm_usize_ty.constInt(0, .False), Type.usize, .neq); - const memcpy_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapSkip"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "MemcpyTrapEnd"); - _ = self.builder.buildCondBr(cond, memcpy_block, end_block); - self.builder.positionBuilderAtEnd(memcpy_block); - _ = self.builder.buildMemCpy( - dest_ptr, + const zero_usize = try o.builder.intValue(try o.lowerType(Type.usize), 0); + const cond = try self.cmp(len, zero_usize, Type.usize, .neq); + const memcpy_block = try self.wip.block(1, "MemcpyTrapSkip"); + const end_block = try self.wip.block(2, "MemcpyTrapEnd"); + _ = try self.wip.brCond(cond, memcpy_block, end_block); + self.wip.cursor = .{ .block = memcpy_block }; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + dest_ptr.toLlvm(&self.wip), dest_ptr_ty.ptrAlignment(mod), - src_ptr, + src_ptr.toLlvm(&self.wip), src_ptr_ty.ptrAlignment(mod), - len, + len.toLlvm(&self.wip), is_volatile, - ); - _ = self.builder.buildBr(end_block); - self.builder.positionBuilderAtEnd(end_block); - return null; + ), &self.wip); + _ = try self.wip.br(end_block); + self.wip.cursor = .{ .block = end_block }; + return .none; } - _ = self.builder.buildMemCpy( - dest_ptr, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + dest_ptr.toLlvm(&self.wip), dest_ptr_ty.ptrAlignment(mod), - src_ptr, + src_ptr.toLlvm(&self.wip), src_ptr_ty.ptrAlignment(mod), - len, + len.toLlvm(&self.wip), is_volatile, - ); - return null; + ), &self.wip); + return .none; } - fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.typeOf(bin_op.lhs).childType(mod); const layout = un_ty.unionGetLayout(mod); - if (layout.tag_size == 0) return null; + if (layout.tag_size == 0) return .none; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); if (layout.payload_size == 0) { // TODO alignment on this store - _ = self.builder.buildStore(new_tag, union_ptr); - return null; + _ = try self.wip.store(.normal, new_tag, union_ptr, .default); + return .none; } - const un_llvm_ty = try o.lowerType(un_ty); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - const tag_field_ptr = self.builder.buildStructGEP(un_llvm_ty, union_ptr, tag_index, ""); + const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(un_ty), union_ptr, tag_index, ""); // TODO alignment on this store - _ = self.builder.buildStore(new_tag, tag_field_ptr); - return null; + _ = try self.wip.store(.normal, new_tag, tag_field_ptr, .default); + return .none; } - fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.typeOf(ty_op.operand); const layout = un_ty.unionGetLayout(mod); - if (layout.tag_size == 0) return null; + if (layout.tag_size == 0) return .none; const union_handle = try self.resolveInst(ty_op.operand); if (isByRef(un_ty, mod)) { const llvm_un_ty = try o.lowerType(un_ty); - if (layout.payload_size == 0) { - return self.builder.buildLoad(llvm_un_ty, union_handle, ""); - } + if (layout.payload_size == 0) + return self.wip.load(.normal, llvm_un_ty, union_handle, .default, ""); const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - const tag_field_ptr = self.builder.buildStructGEP(llvm_un_ty, union_handle, tag_index, ""); - return self.builder.buildLoad(llvm_un_ty.structGetTypeAtIndex(tag_index), tag_field_ptr, ""); + const tag_field_ptr = try self.wip.gepStruct(llvm_un_ty, union_handle, tag_index, ""); + const llvm_tag_ty = llvm_un_ty.structFields(&o.builder)[tag_index]; + return self.wip.load(.normal, llvm_tag_ty, tag_field_ptr, .default, ""); } else { - if (layout.payload_size == 0) { - return union_handle; - } + if (layout.payload_size == 0) return union_handle; const tag_index = @intFromBool(layout.tag_align < layout.payload_align); - return self.builder.buildExtractValue(union_handle, tag_index, ""); + return self.wip.extractValue(union_handle, &.{tag_index}, ""); } } - fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { + fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !Builder.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); @@ -8772,7 +9231,7 @@ pub const FuncGen = struct { return self.buildFloatOp(op, operand_ty, 1, .{operand}); } - fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const un_op = self.air.instructions.items(.data)[inst].un_op; @@ -8782,60 +9241,64 @@ pub const FuncGen = struct { return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } - fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = try o.lowerType(operand_ty); - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{ llvm_operand_ty, .i1 }, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); - const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const params = [_]*llvm.Value{ + operand.toLlvm(&self.wip), + Builder.Constant.false.toLlvm(&o.builder), + }; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); - - const bits = operand_ty.intInfo(mod).bits; - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const params = [_]*llvm.Value{operand}; - const operand_llvm_ty = try o.lowerType(operand_ty); - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); - - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const llvm_operand_ty = try o.lowerType(operand_ty); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); + + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); - - const bits = operand_ty.intInfo(mod).bits; - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -8844,52 +9307,47 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try o.lowerType(operand_ty); + var llvm_operand_ty = try o.lowerType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap - const scalar_llvm_ty = self.context.intType(bits + 8); + const scalar_ty = try o.builder.intType(@intCast(bits + 8)); if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(mod); - operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); - - const shifts = try self.gpa.alloc(*llvm.Value, vec_len); - defer self.gpa.free(shifts); + llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); + } else llvm_operand_ty = scalar_ty; - for (shifts) |*elem| { - elem.* = scalar_llvm_ty.constInt(8, .False); - } - const shift_vec = llvm.constVector(shifts.ptr, vec_len); + const shift_amt = + try o.builder.splatValue(llvm_operand_ty, try o.builder.intConst(scalar_ty, 8)); + const extended = try self.wip.cast(.zext, operand, llvm_operand_ty, ""); + operand = try self.wip.bin(.shl, extended, shift_amt, ""); - const extended = self.builder.buildZExt(operand, operand_llvm_ty, ""); - operand = self.builder.buildShl(extended, shift_vec, ""); - } else { - const extended = self.builder.buildZExt(operand, scalar_llvm_ty, ""); - operand = self.builder.buildShl(extended, scalar_llvm_ty.constInt(8, .False), ""); - operand_llvm_ty = scalar_llvm_ty; - } bits = bits + 8; } - const params = [_]*llvm.Value{operand}; - const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); + const llvm_fn_ty = try o.builder.fnType(llvm_operand_ty, &.{llvm_operand_ty}, .normal); + const fn_val = try self.getIntrinsic(llvm_fn_name, &.{llvm_operand_ty}); - const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + const wrong_size_result = (try self.wip.unimplemented(llvm_operand_ty, "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); const result_ty = self.typeOfIndex(inst); - const result_llvm_ty = try o.lowerType(result_ty); - const result_bits = result_ty.intInfo(mod).bits; - if (bits > result_bits) { - return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); - } else if (bits < result_bits) { - return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); - } else { - return wrong_size_result; - } + return self.wip.conv(.unsigned, wrong_size_result, try o.lowerType(result_ty), ""); } - fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; @@ -8897,50 +9355,53 @@ pub const FuncGen = struct { const error_set_ty = self.air.getRefType(ty_op.ty); const names = error_set_ty.errorSetNames(mod); - const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); - const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); - const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); - const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len))); + const valid_block = try self.wip.block(@intCast(names.len), "Valid"); + const invalid_block = try self.wip.block(1, "Invalid"); + const end_block = try self.wip.block(2, "End"); + var wip_switch = try self.wip.@"switch"(operand, invalid_block, @intCast(names.len)); + defer wip_switch.finish(&self.wip); for (names) |name| { - const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?)); - const this_tag_int_value = try o.lowerValue(.{ - .ty = Type.err_int, - .val = try mod.intValue(Type.err_int, err_int), - }); - switch_instr.addCase(this_tag_int_value, valid_block); - } - self.builder.positionBuilderAtEnd(valid_block); - _ = self.builder.buildBr(end_block); - - self.builder.positionBuilderAtEnd(invalid_block); - _ = self.builder.buildBr(end_block); - - self.builder.positionBuilderAtEnd(end_block); - - const llvm_type = self.context.intType(1); - const incoming_values: [2]*llvm.Value = .{ - llvm_type.constInt(1, .False), llvm_type.constInt(0, .False), - }; - const incoming_blocks: [2]*llvm.BasicBlock = .{ - valid_block, invalid_block, - }; - const phi_node = self.builder.buildPhi(llvm_type, ""); - phi_node.addIncoming(&incoming_values, &incoming_blocks, 2); - return phi_node; + const err_int = mod.global_error_set.getIndex(name).?; + const this_tag_int_value = try o.builder.intConst(Builder.Type.err_int, err_int); + try wip_switch.addCase(this_tag_int_value, valid_block, &self.wip); + } + self.wip.cursor = .{ .block = valid_block }; + _ = try self.wip.br(end_block); + + self.wip.cursor = .{ .block = invalid_block }; + _ = try self.wip.br(end_block); + + self.wip.cursor = .{ .block = end_block }; + const phi = try self.wip.phi(.i1, ""); + try phi.finish( + &.{ Builder.Constant.true.toValue(), Builder.Constant.false.toValue() }, + &.{ valid_block, invalid_block }, + &self.wip, + ); + return phi.toValue(); } - fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); - const params = [_]*llvm.Value{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(.i1, "")).finish(self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { + fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; const mod = o.module; const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; @@ -8950,185 +9411,207 @@ pub const FuncGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(o.named_enum_map.remove(enum_type.decl)); - var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); - - const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())}; + const llvm_fn_name = try o.builder.fmt("__zig_is_named_enum_value_{}", .{ + fqn.fmt(&mod.intern_pool), + }); - const llvm_ret_ty = try o.lowerType(Type.bool); - const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); - const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(.i1, &.{ + try o.lowerType(enum_type.tag_ty.toType()), + }, .normal); + const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); o.addCommonFnAttributes(fn_val); - gop.value_ptr.* = fn_val; - const prev_block = self.builder.getInsertBlock(); - const prev_debug_location = self.builder.getCurrentDebugLocation2(); - defer { - self.builder.positionBuilderAtEnd(prev_block); - if (self.di_scope != null) { - self.builder.setCurrentDebugLocation2(prev_debug_location); - } + var global = Builder.Global{ + .linkage = .internal, + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; + try o.builder.llvm.globals.append(self.gpa, fn_val); + _ = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); + gop.value_ptr.* = global.kind.function; + + var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); + defer wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; + + const named_block = try wip.block(@intCast(enum_type.names.len), "Named"); + const unnamed_block = try wip.block(1, "Unnamed"); + const tag_int_value = wip.arg(0); + var wip_switch = try wip.@"switch"(tag_int_value, unnamed_block, @intCast(enum_type.names.len)); + defer wip_switch.finish(&wip); + + for (0..enum_type.names.len) |field_index| { + const this_tag_int_value = try o.lowerValue( + (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + ); + try wip_switch.addCase(this_tag_int_value, named_block, &wip); } + wip.cursor = .{ .block = named_block }; + _ = try wip.ret(Builder.Constant.true.toValue()); - const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); - self.builder.positionBuilderAtEnd(entry_block); - self.builder.clearCurrentDebugLocation(); - - const named_block = self.context.appendBasicBlock(fn_val, "Named"); - const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); - const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len))); - - for (enum_type.names, 0..) |_, field_index_usize| { - const field_index = @as(u32, @intCast(field_index_usize)); - const this_tag_int_value = int: { - break :int try o.lowerValue(.{ - .ty = enum_ty, - .val = try mod.enumValueFieldIndex(enum_ty, field_index), - }); - }; - switch_instr.addCase(this_tag_int_value, named_block); - } - self.builder.positionBuilderAtEnd(named_block); - _ = self.builder.buildRet(self.context.intType(1).constInt(1, .False)); + wip.cursor = .{ .block = unnamed_block }; + _ = try wip.ret(Builder.Constant.false.toValue()); - self.builder.positionBuilderAtEnd(unnamed_block); - _ = self.builder.buildRet(self.context.intType(1).constInt(0, .False)); - return fn_val; + try wip.finish(); + return global.kind.function; } - fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); - const params = [_]*llvm.Value{operand}; - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); + const llvm_fn_ty = llvm_fn.typeOf(&o.builder); + const params = [_]*llvm.Value{operand.toLlvm(&self.wip)}; + return (try self.wip.unimplemented(llvm_fn_ty.functionReturn(&o.builder), "")).finish( + self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .Fast, + .Auto, + "", + ), + &self.wip, + ); } - fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { + fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { const o = self.dg.object; const mod = o.module; const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl); - if (gop.found_existing) return gop.value_ptr.*; + if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function; errdefer assert(o.decl_map.remove(enum_type.decl)); - var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); + const llvm_fn_name = try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); - const slice_ty = Type.slice_const_u8_sentinel_0; - const llvm_ret_ty = try o.lowerType(slice_ty); - const usize_llvm_ty = try o.lowerType(Type.usize); - const slice_alignment = slice_ty.abiAlignment(mod); - - const param_types = [_]*llvm.Type{try o.lowerType(enum_type.tag_ty.toType())}; + const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); + const usize_ty = try o.lowerType(Type.usize); - const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); - const fn_val = o.llvm_module.addFunction(llvm_fn_name, fn_type); + const fn_type = try o.builder.fnType(ret_ty, &.{ + try o.lowerType(enum_type.tag_ty.toType()), + }, .normal); + const fn_val = o.llvm_module.addFunction(llvm_fn_name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); fn_val.setLinkage(.Internal); fn_val.setFunctionCallConv(.Fast); o.addCommonFnAttributes(fn_val); - gop.value_ptr.* = fn_val; - - const prev_block = self.builder.getInsertBlock(); - const prev_debug_location = self.builder.getCurrentDebugLocation2(); - defer { - self.builder.positionBuilderAtEnd(prev_block); - if (self.di_scope != null) { - self.builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - const entry_block = self.context.appendBasicBlock(fn_val, "Entry"); - self.builder.positionBuilderAtEnd(entry_block); - self.builder.clearCurrentDebugLocation(); - - const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); - const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len))); - const array_ptr_indices = [_]*llvm.Value{ - usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), + var global = Builder.Global{ + .linkage = .internal, + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, }; - - for (enum_type.names, 0..) |name_ip, field_index_usize| { - const field_index = @as(u32, @intCast(field_index_usize)); - const name = mod.intern_pool.stringToSlice(name_ip); - const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False); - const str_init_llvm_ty = str_init.typeOf(); - const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, ""); - str_global.setInitializer(str_init); - str_global.setLinkage(.Private); - str_global.setGlobalConstant(.True); - str_global.setUnnamedAddr(.True); - str_global.setAlignment(1); - - const slice_fields = [_]*llvm.Value{ - str_init_llvm_ty.constInBoundsGEP(str_global, &array_ptr_indices, array_ptr_indices.len), - usize_llvm_ty.constInt(name.len, .False), + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; + try o.builder.llvm.globals.append(self.gpa, fn_val); + gop.value_ptr.* = try o.builder.addGlobal(llvm_fn_name, global); + try o.builder.functions.append(self.gpa, function); + + var wip = try Builder.WipFunction.init(&o.builder, global.kind.function); + defer wip.deinit(); + wip.cursor = .{ .block = try wip.block(0, "Entry") }; + + const bad_value_block = try wip.block(1, "BadValue"); + const tag_int_value = wip.arg(0); + var wip_switch = + try wip.@"switch"(tag_int_value, bad_value_block, @intCast(enum_type.names.len)); + defer wip_switch.finish(&wip); + + for (enum_type.names, 0..) |name_ip, field_index| { + const name = try o.builder.string(mod.intern_pool.stringToSlice(name_ip)); + const str_init = try o.builder.stringNullConst(name); + const str_ty = str_init.typeOf(&o.builder); + const str_llvm_global = o.llvm_module.addGlobal(str_ty.toLlvm(&o.builder), ""); + str_llvm_global.setInitializer(str_init.toLlvm(&o.builder)); + str_llvm_global.setLinkage(.Private); + str_llvm_global.setGlobalConstant(.True); + str_llvm_global.setUnnamedAddr(.True); + str_llvm_global.setAlignment(1); + + var str_global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = str_ty, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, }; - const slice_init = llvm_ret_ty.constNamedStruct(&slice_fields, slice_fields.len); - const slice_global = o.llvm_module.addGlobal(slice_init.typeOf(), ""); - slice_global.setInitializer(slice_init); - slice_global.setLinkage(.Private); - slice_global.setGlobalConstant(.True); - slice_global.setUnnamedAddr(.True); - slice_global.setAlignment(slice_alignment); - - const return_block = self.context.appendBasicBlock(fn_val, "Name"); - const this_tag_int_value = try o.lowerValue(.{ - .ty = enum_ty, - .val = try mod.enumValueFieldIndex(enum_ty, field_index), + var str_variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = str_init, + .alignment = comptime Builder.Alignment.fromByteUnits(1), + }; + try o.builder.llvm.globals.append(o.gpa, str_llvm_global); + const global_index = try o.builder.addGlobal(.empty, str_global); + try o.builder.variables.append(o.gpa, str_variable); + + const slice_val = try o.builder.structValue(ret_ty, &.{ + global_index.toConst(), + try o.builder.intConst(usize_ty, name.toSlice(&o.builder).?.len), }); - switch_instr.addCase(this_tag_int_value, return_block); - self.builder.positionBuilderAtEnd(return_block); - const loaded = self.builder.buildLoad(llvm_ret_ty, slice_global, ""); - loaded.setAlignment(slice_alignment); - _ = self.builder.buildRet(loaded); + const return_block = try wip.block(1, "Name"); + const this_tag_int_value = try o.lowerValue( + (try mod.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(), + ); + try wip_switch.addCase(this_tag_int_value, return_block, &wip); + + wip.cursor = .{ .block = return_block }; + _ = try wip.ret(slice_val); } - self.builder.positionBuilderAtEnd(bad_value_block); - _ = self.builder.buildUnreachable(); - return fn_val; + wip.cursor = .{ .block = bad_value_block }; + _ = try wip.@"unreachable"(); + + try wip.finish(); + return global.kind.function; } - fn getCmpLtErrorsLenFunction(self: *FuncGen) !*llvm.Value { + fn getCmpLtErrorsLenFunction(self: *FuncGen) !Builder.Function.Index { const o = self.dg.object; - if (o.llvm_module.getNamedFunction(lt_errors_fn_name)) |llvm_fn| { - return llvm_fn; - } + const name = try o.builder.string(lt_errors_fn_name); + if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function; // Function signature: fn (anyerror) bool - const ret_llvm_ty = try o.lowerType(Type.bool); - const anyerror_llvm_ty = try o.lowerType(Type.anyerror); - const param_types = [_]*llvm.Type{anyerror_llvm_ty}; + const fn_type = try o.builder.fnType(.i1, &.{Builder.Type.err_int}, .normal); + const llvm_fn = o.llvm_module.addFunction(name.toSlice(&o.builder).?, fn_type.toLlvm(&o.builder)); - const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); - const llvm_fn = o.llvm_module.addFunction(lt_errors_fn_name, fn_type); llvm_fn.setLinkage(.Internal); llvm_fn.setFunctionCallConv(.Fast); o.addCommonFnAttributes(llvm_fn); - return llvm_fn; + + var global = Builder.Global{ + .linkage = .internal, + .type = fn_type, + .kind = .{ .function = @enumFromInt(o.builder.functions.items.len) }, + }; + var function = Builder.Function{ + .global = @enumFromInt(o.builder.globals.count()), + }; + + try o.builder.llvm.globals.append(self.gpa, llvm_fn); + _ = try o.builder.addGlobal(name, global); + try o.builder.functions.append(self.gpa, function); + return global.kind.function; } - fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); @@ -9136,34 +9619,32 @@ pub const FuncGen = struct { const slice_llvm_ty = try o.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); - const ptr_slice_llvm_ty = self.context.pointerType(0); - const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, ""); - const indices = [_]*llvm.Value{operand}; - const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, ""); - return self.builder.buildLoad(slice_llvm_ty, error_name_ptr, ""); + const error_name_table = + try self.wip.load(.normal, .ptr, error_name_table_ptr.toValue(&o.builder), .default, ""); + const error_name_ptr = + try self.wip.gep(.inbounds, slice_llvm_ty, error_name_table, &.{operand}, ""); + return self.wip.load(.normal, slice_llvm_ty, error_name_ptr, .default, ""); } - fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; - const mod = o.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(mod); - return self.builder.buildVectorSplat(len, scalar, ""); + return self.wip.splatVector(try o.lowerType(vector_ty), scalar, ""); } - fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const pred = try self.resolveInst(pl_op.operand); const a = try self.resolveInst(extra.lhs); const b = try self.resolveInst(extra.rhs); - return self.builder.buildSelect(pred, a, b, ""); + return self.wip.select(pred, a, b, ""); } - fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -9179,24 +9660,25 @@ pub const FuncGen = struct { // when changing code, so Zig uses negative numbers to index the // second vector. These start at -1 and go down, and are easiest to use // with the ~ operator. Here we convert between the two formats. - const values = try self.gpa.alloc(*llvm.Value, mask_len); + const values = try self.gpa.alloc(Builder.Constant, mask_len); defer self.gpa.free(values); - const llvm_i32 = self.context.intType(32); - for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { - val.* = llvm_i32.getUndef(); + val.* = try o.builder.undefConst(.i32); } else { const int = elem.toSignedInt(mod); - const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len)); - val.* = llvm_i32.constInt(unsigned, .False); + const unsigned: u32 = @intCast(if (int >= 0) int else ~int + a_len); + val.* = try o.builder.intConst(.i32, unsigned); } } - const llvm_mask_value = llvm.constVector(values.ptr, mask_len); - return self.builder.buildShuffleVector(a, b, llvm_mask_value, ""); + const llvm_mask_value = try o.builder.vectorValue( + try o.builder.vectorType(.normal, mask_len, .i32), + values, + ); + return self.wip.shuffleVector(a, b, llvm_mask_value, ""); } /// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result. @@ -9213,58 +9695,69 @@ pub const FuncGen = struct { /// fn buildReducedCall( self: *FuncGen, - llvm_fn: *llvm.Value, - operand_vector: *llvm.Value, + llvm_fn: Builder.Function.Index, + operand_vector: Builder.Value, vector_len: usize, - accum_init: *llvm.Value, - ) !*llvm.Value { + accum_init: Builder.Value, + ) !Builder.Value { const o = self.dg.object; - const llvm_usize_ty = try o.lowerType(Type.usize); - const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False); - const llvm_result_ty = accum_init.typeOf(); + const usize_ty = try o.lowerType(Type.usize); + const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len); + const llvm_result_ty = accum_init.typeOfWip(&self.wip); // Allocate and initialize our mutable variables - const i_ptr = self.buildAlloca(llvm_usize_ty, null); - _ = self.builder.buildStore(llvm_usize_ty.constInt(0, .False), i_ptr); - const accum_ptr = self.buildAlloca(llvm_result_ty, null); - _ = self.builder.buildStore(accum_init, accum_ptr); + const i_ptr = try self.buildAlloca(usize_ty, .default); + _ = try self.wip.store(.normal, try o.builder.intValue(usize_ty, 0), i_ptr, .default); + const accum_ptr = try self.buildAlloca(llvm_result_ty, .default); + _ = try self.wip.store(.normal, accum_init, accum_ptr, .default); // Setup the loop - const loop = self.context.appendBasicBlock(self.llvm_func, "ReduceLoop"); - const loop_exit = self.context.appendBasicBlock(self.llvm_func, "AfterReduce"); - _ = self.builder.buildBr(loop); + const loop = try self.wip.block(2, "ReduceLoop"); + const loop_exit = try self.wip.block(1, "AfterReduce"); + _ = try self.wip.br(loop); { - self.builder.positionBuilderAtEnd(loop); + self.wip.cursor = .{ .block = loop }; // while (i < vec.len) - const i = self.builder.buildLoad(llvm_usize_ty, i_ptr, ""); - const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len, ""); - const loop_then = self.context.appendBasicBlock(self.llvm_func, "ReduceLoopThen"); + const i = try self.wip.load(.normal, usize_ty, i_ptr, .default, ""); + const cond = try self.wip.icmp(.ult, i, llvm_vector_len, ""); + const loop_then = try self.wip.block(1, "ReduceLoopThen"); - _ = self.builder.buildCondBr(cond, loop_then, loop_exit); + _ = try self.wip.brCond(cond, loop_then, loop_exit); { - self.builder.positionBuilderAtEnd(loop_then); + self.wip.cursor = .{ .block = loop_then }; // accum = f(accum, vec[i]); - const accum = self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); - const element = self.builder.buildExtractElement(operand_vector, i, ""); - const params = [2]*llvm.Value{ accum, element }; - const new_accum = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, ¶ms, params.len, .C, .Auto, ""); - _ = self.builder.buildStore(new_accum, accum_ptr); + const accum = try self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, ""); + const element = try self.wip.extractElement(operand_vector, i, ""); + const params = [2]*llvm.Value{ accum.toLlvm(&self.wip), element.toLlvm(&self.wip) }; + const new_accum = (try self.wip.unimplemented(llvm_result_ty, "")).finish( + self.builder.buildCall( + llvm_fn.typeOf(&o.builder).toLlvm(&o.builder), + llvm_fn.toLlvm(&o.builder), + ¶ms, + params.len, + .C, + .Auto, + "", + ), + &self.wip, + ); + _ = try self.wip.store(.normal, new_accum, accum_ptr, .default); // i += 1 - const new_i = self.builder.buildAdd(i, llvm_usize_ty.constInt(1, .False), ""); - _ = self.builder.buildStore(new_i, i_ptr); - _ = self.builder.buildBr(loop); + const new_i = try self.wip.bin(.add, i, try o.builder.intValue(usize_ty, 1), ""); + _ = try self.wip.store(.normal, new_i, i_ptr, .default); + _ = try self.wip.br(loop); } } - self.builder.positionBuilderAtEnd(loop_exit); - return self.builder.buildLoad(llvm_result_ty, accum_ptr, ""); + self.wip.cursor = .{ .block = loop_exit }; + return self.wip.load(.normal, llvm_result_ty, accum_ptr, .default, ""); } - fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { + fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !Builder.Value { self.builder.setFastMath(want_fast_math); const o = self.dg.object; const mod = o.module; @@ -9274,40 +9767,70 @@ pub const FuncGen = struct { const operand = try self.resolveInst(reduce.operand); const operand_ty = self.typeOf(reduce.operand); const scalar_ty = self.typeOfIndex(inst); + const llvm_scalar_ty = try o.lowerType(scalar_ty); switch (reduce.operation) { - .And => return self.builder.buildAndReduce(operand), - .Or => return self.builder.buildOrReduce(operand), - .Xor => return self.builder.buildXorReduce(operand), + .And => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildAndReduce(operand.toLlvm(&self.wip)), &self.wip), + .Or => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildOrReduce(operand.toLlvm(&self.wip)), &self.wip), + .Xor => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildXorReduce(operand.toLlvm(&self.wip)), &self.wip), .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildIntMinReduce( + operand.toLlvm(&self.wip), + scalar_ty.isSignedInt(mod), + ), + &self.wip, + ), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return self.builder.buildFPMinReduce(operand); + return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildFPMinReduce(operand.toLlvm(&self.wip)), &self.wip); }, else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildIntMaxReduce( + operand.toLlvm(&self.wip), + scalar_ty.isSignedInt(mod), + ), + &self.wip, + ), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - return self.builder.buildFPMaxReduce(operand); + return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildFPMaxReduce(operand.toLlvm(&self.wip)), &self.wip); }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildAddReduce(operand), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildAddReduce(operand.toLlvm(&self.wip)), &self.wip), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); - const neutral_value = scalar_llvm_ty.constReal(-0.0); - return self.builder.buildFPAddReduce(neutral_value, operand); + const neutral_value = try o.builder.fpConst(llvm_scalar_ty, -0.0); + return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildFPAddReduce( + neutral_value.toLlvm(&o.builder), + operand.toLlvm(&self.wip), + ), + &self.wip, + ); }, else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(mod)) { - .Int => return self.builder.buildMulReduce(operand), + .Int => return (try self.wip.unimplemented(llvm_scalar_ty, "")) + .finish(self.builder.buildMulReduce(operand.toLlvm(&self.wip)), &self.wip), .Float => if (intrinsicsAllowed(scalar_ty, target)) { - const scalar_llvm_ty = try o.lowerType(scalar_ty); - const neutral_value = scalar_llvm_ty.constReal(1.0); - return self.builder.buildFPMulReduce(neutral_value, operand); + const neutral_value = try o.builder.fpConst(llvm_scalar_ty, 1.0); + return (try self.wip.unimplemented(llvm_scalar_ty, "")).finish( + self.builder.buildFPMulReduce( + neutral_value.toLlvm(&o.builder), + operand.toLlvm(&self.wip), + ), + &self.wip, + ); }, else => unreachable, }, @@ -9315,58 +9838,71 @@ pub const FuncGen = struct { // Reduction could not be performed with intrinsics. // Use a manual loop over a softfloat call instead. - var fn_name_buf: [64]u8 = undefined; const float_bits = scalar_ty.floatBits(target); const fn_name = switch (reduce.operation) { - .Min => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{ + .Min => try o.builder.fmt("{s}fmin{s}", .{ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), - }) catch unreachable, - .Max => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{ + }), + .Max => try o.builder.fmt("{s}fmax{s}", .{ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits), - }) catch unreachable, - .Add => std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{ + }), + .Add => try o.builder.fmt("__add{s}f3", .{ compilerRtFloatAbbrev(float_bits), - }) catch unreachable, - .Mul => std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{ + }), + .Mul => try o.builder.fmt("__mul{s}f3", .{ compilerRtFloatAbbrev(float_bits), - }) catch unreachable, + }), else => unreachable, }; - const param_llvm_ty = try o.lowerType(scalar_ty); - const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; - const libc_fn = self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); - const init_value = try o.lowerValue(.{ - .ty = scalar_ty, - .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { - .Min => std.math.nan(f32), - .Max => std.math.nan(f32), - .Add => -0.0, - .Mul => 1.0, - else => unreachable, - }), - }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); + const libc_fn = + try self.getLibcFunction(fn_name, &.{ llvm_scalar_ty, llvm_scalar_ty }, llvm_scalar_ty); + const init_val = switch (llvm_scalar_ty) { + .i16 => try o.builder.intValue(.i16, @as(i16, @bitCast( + @as(f16, switch (reduce.operation) { + .Min, .Max => std.math.nan(f16), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), + .i80 => try o.builder.intValue(.i80, @as(i80, @bitCast( + @as(f80, switch (reduce.operation) { + .Min, .Max => std.math.nan(f80), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), + .i128 => try o.builder.intValue(.i128, @as(i128, @bitCast( + @as(f128, switch (reduce.operation) { + .Min, .Max => std.math.nan(f128), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), + ))), + else => unreachable, + }; + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_val); } - fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @as(usize, @intCast(result_ty.arrayLen(mod))); - const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); + const len: usize = @intCast(result_ty.arrayLen(mod)); + const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try o.lowerType(result_ty); switch (result_ty.zigTypeTag(mod)) { .Vector => { - const llvm_u32 = self.context.intType(32); - - var vector = llvm_result_ty.getUndef(); + var vector = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { - const index_u32 = llvm_u32.constInt(i, .False); + const index_u32 = try o.builder.intValue(.i32, i); const llvm_elem = try self.resolveInst(elem); - vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, ""); + vector = try self.wip.insertElement(vector, llvm_elem, index_u32, ""); } return vector; }, @@ -9375,48 +9911,47 @@ pub const FuncGen = struct { const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); + const int_ty = try o.builder.intType(@intCast(big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_int = try o.builder.intValue(int_ty, 0); var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = self.context.intType(ty_bit_size); + const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod)); + const small_int_ty = try o.builder.intType(ty_bit_size); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(non_int_val, small_int_ty, "") + try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else - self.builder.buildBitCast(non_int_val, small_int_ty, ""); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + try self.wip.cast(.bitcast, non_int_val, small_int_ty, ""); + const shift_rhs = try o.builder.intValue(int_ty, running_bits); // If the field is as large as the entire packed struct, this // zext would go from, e.g. i16 to i16. This is legal with // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, ""); - const shifted = self.builder.buildShl(extended_int_val, shift_rhs, ""); - running_int = self.builder.buildOr(running_int, shifted, ""); + const extended_int_val = try self.wip.conv(.unsigned, small_int_val, int_ty, ""); + const shifted = try self.wip.bin(.shl, extended_int_val, shift_rhs, ""); + running_int = try self.wip.bin(.@"or", running_int, shifted, ""); running_bits += ty_bit_size; } return running_int; } if (isByRef(result_ty, mod)) { - const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment); - var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmField(result_ty, i, mod).?.index; - indices[1] = llvm_u32.constInt(llvm_i, .False); - const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const field_ptr = + try self.wip.gepStruct(llvm_result_ty, alloca_inst, llvm_i, ""); const field_ptr_ty = try mod.ptrType(.{ .child = self.typeOf(elem).toIntern(), .flags = .{ @@ -9425,18 +9960,18 @@ pub const FuncGen = struct { ), }, }); - try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); + try self.store(field_ptr, field_ptr_ty, llvm_elem, .none); } return alloca_inst; } else { - var result = llvm_result_ty.getUndef(); + var result = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmField(result_ty, i, mod).?.index; - result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); + result = try self.wip.insertValue(result, llvm_elem, &.{llvm_i}, ""); } return result; } @@ -9445,7 +9980,9 @@ pub const FuncGen = struct { assert(isByRef(result_ty, mod)); const llvm_usize = try o.lowerType(Type.usize); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); + const usize_zero = try o.builder.intValue(llvm_usize, 0); + const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment); const array_info = result_ty.arrayInfo(mod); const elem_ptr_ty = try mod.ptrType(.{ @@ -9453,26 +9990,21 @@ pub const FuncGen = struct { }); for (elements, 0..) |elem, i| { - const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), - llvm_usize.constInt(@as(c_uint, @intCast(i)), .False), - }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{ + usize_zero, try o.builder.intValue(llvm_usize, i), + }, ""); const llvm_elem = try self.resolveInst(elem); - try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic); + try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .none); } if (array_info.sentinel) |sent_val| { - const indices: [2]*llvm.Value = .{ - llvm_usize.constNull(), - llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False), - }; - const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); + const elem_ptr = try self.wip.gep(.inbounds, llvm_result_ty, alloca_inst, &.{ + usize_zero, try o.builder.intValue(llvm_usize, array_info.len), + }, ""); const llvm_elem = try self.resolveValue(.{ .ty = array_info.elem_type, .val = sent_val, }); - - try self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic); + try self.store(elem_ptr, elem_ptr_ty, llvm_elem.toValue(), .none); } return alloca_inst; @@ -9481,7 +10013,7 @@ pub const FuncGen = struct { } } - fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const mod = o.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -9493,16 +10025,15 @@ pub const FuncGen = struct { if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); - const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits))); + const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod))); - const small_int_ty = self.context.intType(ty_bit_size); + const small_int_ty = try o.builder.intType(@intCast(field.ty.bitSize(mod))); const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(non_int_val, small_int_ty, "") + try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "") else - self.builder.buildBitCast(non_int_val, small_int_ty, ""); - return self.builder.buildZExtOrBitCast(small_int_val, int_llvm_ty, ""); + try self.wip.cast(.bitcast, non_int_val, small_int_ty, ""); + return self.wip.conv(.unsigned, small_int_val, int_llvm_ty, ""); } const tag_int = blk: { @@ -9515,106 +10046,96 @@ pub const FuncGen = struct { }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { - return null; + return .none; } assert(!isByRef(union_ty, mod)); - return union_llvm_ty.constInt(tag_int, .False); + return o.builder.intValue(union_llvm_ty, tag_int); } assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set // the fields appropriately. - const result_ptr = self.buildAlloca(union_llvm_ty, layout.abi_align); + const alignment = Builder.Alignment.fromByteUnits(layout.abi_align); + const result_ptr = try self.buildAlloca(union_llvm_ty, alignment); const llvm_payload = try self.resolveInst(extra.init); assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try o.lowerType(field.ty); const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); + const llvm_usize = try o.lowerType(Type.usize); + const usize_zero = try o.builder.intValue(llvm_usize, 0); + const i32_zero = try o.builder.intValue(.i32, 0); const llvm_union_ty = t: { - const payload = p: { + const payload_ty = p: { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { - const padding_len = @as(c_uint, @intCast(layout.payload_size)); - break :p self.context.intType(8).arrayType(padding_len); + const padding_len = layout.payload_size; + break :p try o.builder.arrayType(padding_len, .i8); } if (field_size == layout.payload_size) { break :p field_llvm_ty; } - const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size)); - const fields: [2]*llvm.Type = .{ - field_llvm_ty, self.context.intType(8).arrayType(padding_len), - }; - break :p self.context.structType(&fields, fields.len, .True); + const padding_len = layout.payload_size - field_size; + break :p try o.builder.structType(.@"packed", &.{ + field_llvm_ty, try o.builder.arrayType(padding_len, .i8), + }); }; - if (layout.tag_size == 0) { - const fields: [1]*llvm.Type = .{payload}; - break :t self.context.structType(&fields, fields.len, .False); - } - const tag_llvm_ty = try o.lowerType(union_obj.tag_ty); - var fields: [3]*llvm.Type = undefined; - var fields_len: c_uint = 2; + if (layout.tag_size == 0) break :t try o.builder.structType(.normal, &.{payload_ty}); + const tag_ty = try o.lowerType(union_obj.tag_ty); + var fields: [3]Builder.Type = undefined; + var fields_len: usize = 2; if (layout.tag_align >= layout.payload_align) { - fields = .{ tag_llvm_ty, payload, undefined }; + fields = .{ tag_ty, payload_ty, undefined }; } else { - fields = .{ payload, tag_llvm_ty, undefined }; + fields = .{ payload_ty, tag_ty, undefined }; } if (layout.padding != 0) { - fields[2] = self.context.intType(8).arrayType(layout.padding); - fields_len = 3; + fields[fields_len] = try o.builder.arrayType(layout.padding, .i8); + fields_len += 1; } - break :t self.context.structType(&fields, fields_len, .False); + break :t try o.builder.structType(.normal, fields[0..fields_len]); }; // Now we follow the layout as expressed above with GEP instructions to set the // tag and the payload. - const index_type = self.context.intType(32); - const field_ptr_ty = try mod.ptrType(.{ .child = field.ty.toIntern(), - .flags = .{ - .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), - }, + .flags = .{ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align) }, }); if (layout.tag_size == 0) { - const indices: [3]*llvm.Value = .{ - index_type.constNull(), - index_type.constNull(), - index_type.constNull(), - }; - const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); - try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); + const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero }; + const len: usize = if (field_size == layout.payload_size) 2 else 3; + const field_ptr = + try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], ""); + try self.store(field_ptr, field_ptr_ty, llvm_payload, .none); return result_ptr; } { - const indices: [3]*llvm.Value = .{ - index_type.constNull(), - index_type.constInt(@intFromBool(layout.tag_align >= layout.payload_align), .False), - index_type.constNull(), - }; - const len: c_uint = if (field_size == layout.payload_size) 2 else 3; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, len, ""); - try self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic); + const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); + const indices: [3]Builder.Value = + .{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero }; + const len: usize = if (field_size == layout.payload_size) 2 else 3; + const field_ptr = + try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, indices[0..len], ""); + try self.store(field_ptr, field_ptr_ty, llvm_payload, .none); } { - const indices: [2]*llvm.Value = .{ - index_type.constNull(), - index_type.constInt(@intFromBool(layout.tag_align < layout.payload_align), .False), - }; - const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, result_ptr, &indices, indices.len, ""); - const tag_llvm_ty = try o.lowerType(union_obj.tag_ty); - const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); - const store_inst = self.builder.buildStore(llvm_tag, field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); + const tag_index = @intFromBool(layout.tag_align < layout.payload_align); + const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) }; + const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, ""); + const tag_ty = try o.lowerType(union_obj.tag_ty); + const llvm_tag = try o.builder.intValue(tag_ty, tag_int); + const tag_alignment = Builder.Alignment.fromByteUnits(union_obj.tag_ty.abiAlignment(mod)); + _ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment); } return result_ptr; } - fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const prefetch = self.air.instructions.items(.data)[inst].prefetch; @@ -9643,10 +10164,10 @@ pub const FuncGen = struct { .powerpcle, .powerpc64, .powerpc64le, - => return null, + => return .none, .arm, .armeb, .thumb, .thumbeb => { switch (prefetch.rw) { - .write => return null, + .write => return .none, else => {}, } }, @@ -9655,58 +10176,64 @@ pub const FuncGen = struct { .data => {}, } - const llvm_ptr_u8 = self.context.pointerType(0); - const llvm_u32 = self.context.intType(32); - const llvm_fn_name = "llvm.prefetch.p0"; - const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { - // declare void @llvm.prefetch(i8*, i32, i32, i32) - const llvm_void = self.context.voidType(); - const param_types = [_]*llvm.Type{ - llvm_ptr_u8, llvm_u32, llvm_u32, llvm_u32, - }; - const fn_type = llvm.functionType(llvm_void, ¶m_types, param_types.len, .False); - break :blk o.llvm_module.addFunction(llvm_fn_name, fn_type); - }; + // declare void @llvm.prefetch(i8*, i32, i32, i32) + const llvm_fn_ty = try o.builder.fnType(.void, &.{ .ptr, .i32, .i32, .i32 }, .normal); + const fn_val = o.llvm_module.getNamedFunction(llvm_fn_name) orelse + o.llvm_module.addFunction(llvm_fn_name, llvm_fn_ty.toLlvm(&o.builder)); const ptr = try self.resolveInst(prefetch.ptr); const params = [_]*llvm.Value{ - ptr, - llvm_u32.constInt(@intFromEnum(prefetch.rw), .False), - llvm_u32.constInt(prefetch.locality, .False), - llvm_u32.constInt(@intFromEnum(prefetch.cache), .False), + ptr.toLlvm(&self.wip), + (try o.builder.intConst(.i32, @intFromEnum(prefetch.rw))).toLlvm(&o.builder), + (try o.builder.intConst(.i32, prefetch.locality)).toLlvm(&o.builder), + (try o.builder.intConst(.i32, @intFromEnum(prefetch.cache))).toLlvm(&o.builder), }; - _ = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - return null; + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildCall( + llvm_fn_ty.toLlvm(&o.builder), + fn_val, + ¶ms, + params.len, + .C, + .Auto, + "", + ), &self.wip); + return .none; } - fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const llvm_dest_ty = try o.lowerType(inst_ty); - return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, ""); + return self.wip.cast(.addrspacecast, operand, try o.lowerType(inst_ty), ""); } - fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !?*llvm.Value { - const llvm_u32 = self.context.intType(32); - + fn amdgcnWorkIntrinsic(self: *FuncGen, dimension: u32, default: u32, comptime basename: []const u8) !Builder.Value { + const o = self.dg.object; const llvm_fn_name = switch (dimension) { 0 => basename ++ ".x", 1 => basename ++ ".y", 2 => basename ++ ".z", - else => return llvm_u32.constInt(default, .False), + else => return o.builder.intValue(.i32, default), }; const args: [0]*llvm.Value = .{}; - const llvm_fn = self.getIntrinsic(llvm_fn_name, &.{}); - return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); + const llvm_fn = try self.getIntrinsic(llvm_fn_name, &.{}); + return (try self.wip.unimplemented(.i32, "")).finish(self.builder.buildCall( + (try o.builder.fnType(.i32, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); } - fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -9716,37 +10243,41 @@ pub const FuncGen = struct { return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workitem.id"); } - fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures const pl_op = self.air.instructions.items(.data)[inst].pl_op; const dimension = pl_op.payload; - const llvm_u32 = self.context.intType(32); - if (dimension >= 3) { - return llvm_u32.constInt(1, .False); - } + if (dimension >= 3) return o.builder.intValue(.i32, 1); // Fetch the dispatch pointer, which points to this structure: // https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/adae6c61e10d371f7cbc3d0e94ae2c070cab18a4/src/inc/hsa.h#L2913 - const llvm_fn = self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); + const llvm_fn = try self.getIntrinsic("llvm.amdgcn.dispatch.ptr", &.{}); const args: [0]*llvm.Value = .{}; - const dispatch_ptr = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - dispatch_ptr.setAlignment(4); + const llvm_ret_ty = try o.builder.ptrType(Builder.AddrSpace.amdgpu.constant); + const dispatch_ptr = (try self.wip.unimplemented(llvm_ret_ty, "")).finish(self.builder.buildCall( + (try o.builder.fnType(llvm_ret_ty, &.{}, .normal)).toLlvm(&o.builder), + llvm_fn, + &args, + args.len, + .Fast, + .Auto, + "", + ), &self.wip); + o.addAttrInt(dispatch_ptr.toLlvm(&self.wip), 0, "align", 4); // Load the work_group_* member from the struct as u16. // Just treat the dispatch pointer as an array of u16 to keep things simple. - const offset = 2 + dimension; - const index = [_]*llvm.Value{llvm_u32.constInt(offset, .False)}; - const llvm_u16 = self.context.intType(16); - const workgroup_size_ptr = self.builder.buildInBoundsGEP(llvm_u16, dispatch_ptr, &index, index.len, ""); - const workgroup_size = self.builder.buildLoad(llvm_u16, workgroup_size_ptr, ""); - workgroup_size.setAlignment(2); - return workgroup_size; + const workgroup_size_ptr = try self.wip.gep(.inbounds, .i16, dispatch_ptr, &.{ + try o.builder.intValue(try o.lowerType(Type.usize), 2 + dimension), + }, ""); + const workgroup_size_alignment = comptime Builder.Alignment.fromByteUnits(2); + return self.wip.load(.normal, .i16, workgroup_size_ptr, workgroup_size_alignment, ""); } - fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { const o = self.dg.object; const target = o.module.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -9756,65 +10287,82 @@ pub const FuncGen = struct { return self.amdgcnWorkIntrinsic(dimension, 0, "llvm.amdgcn.workgroup.id"); } - fn getErrorNameTable(self: *FuncGen) !*llvm.Value { + fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { const o = self.dg.object; - if (o.error_name_table) |table| { - return table; - } + const table = o.error_name_table; + if (table != .none) return table; const mod = o.module; const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); - const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space + const undef_init = try o.builder.undefConst(.ptr); // TODO: Address space - const error_name_table_global = o.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); - error_name_table_global.setInitializer(llvm_slice_ptr_ty.getUndef()); + const name = try o.builder.string("__zig_err_name_table"); + const error_name_table_global = o.llvm_module.addGlobal(Builder.Type.ptr.toLlvm(&o.builder), name.toSlice(&o.builder).?); + error_name_table_global.setInitializer(undef_init.toLlvm(&o.builder)); error_name_table_global.setLinkage(.Private); error_name_table_global.setGlobalConstant(.True); error_name_table_global.setUnnamedAddr(.True); error_name_table_global.setAlignment(slice_alignment); - o.error_name_table = error_name_table_global; - return error_name_table_global; + var global = Builder.Global{ + .linkage = .private, + .unnamed_addr = .unnamed_addr, + .type = .ptr, + .kind = .{ .variable = @enumFromInt(o.builder.variables.items.len) }, + }; + var variable = Builder.Variable{ + .global = @enumFromInt(o.builder.globals.count()), + .mutability = .constant, + .init = undef_init, + .alignment = Builder.Alignment.fromByteUnits(slice_alignment), + }; + try o.builder.llvm.globals.append(o.gpa, error_name_table_global); + _ = try o.builder.addGlobal(name, global); + try o.builder.variables.append(o.gpa, variable); + + o.error_name_table = global.kind.variable; + return global.kind.variable; } /// Assumes the optional is not pointer-like and payload has bits. - fn optIsNonNull( + fn optCmpNull( self: *FuncGen, - opt_llvm_ty: *llvm.Type, - opt_handle: *llvm.Value, + cond: Builder.IntegerCondition, + opt_llvm_ty: Builder.Type, + opt_handle: Builder.Value, is_by_ref: bool, - ) *llvm.Value { - const non_null_llvm_ty = self.context.intType(8); + ) Allocator.Error!Builder.Value { + const o = self.dg.object; const field = b: { if (is_by_ref) { - const field_ptr = self.builder.buildStructGEP(opt_llvm_ty, opt_handle, 1, ""); - break :b self.builder.buildLoad(non_null_llvm_ty, field_ptr, ""); + const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, ""); + break :b try self.wip.load(.normal, .i8, field_ptr, .default, ""); } - break :b self.builder.buildExtractValue(opt_handle, 1, ""); + break :b try self.wip.extractValue(opt_handle, &.{1}, ""); }; comptime assert(optional_layout_version == 3); - return self.builder.buildICmp(.NE, field, non_null_llvm_ty.constInt(0, .False), ""); + return self.wip.icmp(cond, field, try o.builder.intValue(.i8, 0), ""); } /// Assumes the optional is not pointer-like and payload has bits. fn optPayloadHandle( fg: *FuncGen, - opt_llvm_ty: *llvm.Type, - opt_handle: *llvm.Value, + opt_llvm_ty: Builder.Type, + opt_handle: Builder.Value, opt_ty: Type, can_elide_load: bool, - ) !*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; const payload_ty = opt_ty.optionalChild(mod); if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. - const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); + const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, ""); - const payload_alignment = payload_ty.abiAlignment(mod); + const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod)); if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; @@ -9822,55 +10370,51 @@ pub const FuncGen = struct { return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, false); } const payload_llvm_ty = try o.lowerType(payload_ty); - const load_inst = fg.builder.buildLoad(payload_llvm_ty, payload_ptr, ""); - load_inst.setAlignment(payload_alignment); - return load_inst; + return fg.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, ""); } assert(!isByRef(payload_ty, mod)); - return fg.builder.buildExtractValue(opt_handle, 0, ""); + return fg.wip.extractValue(opt_handle, &.{0}, ""); } fn buildOptional( self: *FuncGen, optional_ty: Type, - payload: *llvm.Value, - non_null_bit: *llvm.Value, - ) !?*llvm.Value { + payload: Builder.Value, + non_null_bit: Builder.Value, + ) !Builder.Value { const o = self.dg.object; const optional_llvm_ty = try o.lowerType(optional_ty); - const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); const mod = o.module; if (isByRef(optional_ty, mod)) { - const payload_alignment = optional_ty.abiAlignment(mod); - const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); + const payload_alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod)); + const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment); { - const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 0, ""); - const store_inst = self.builder.buildStore(payload, field_ptr); - store_inst.setAlignment(payload_alignment); + const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 0, ""); + _ = try self.wip.store(.normal, payload, field_ptr, payload_alignment); } { - const field_ptr = self.builder.buildStructGEP(optional_llvm_ty, alloca_inst, 1, ""); - const store_inst = self.builder.buildStore(non_null_field, field_ptr); - store_inst.setAlignment(1); + const non_null_alignment = comptime Builder.Alignment.fromByteUnits(1); + const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 1, ""); + _ = try self.wip.store(.normal, non_null_field, field_ptr, non_null_alignment); } return alloca_inst; } - const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, ""); - return self.builder.buildInsertValue(partial, non_null_field, 1, ""); + return self.wip.buildAggregate(optional_llvm_ty, &.{ payload, non_null_field }, ""); } fn fieldPtr( self: *FuncGen, inst: Air.Inst.Index, - struct_ptr: *llvm.Value, + struct_ptr: Builder.Value, struct_ptr_ty: Type, field_index: u32, - ) !?*llvm.Value { + ) !Builder.Value { const o = self.dg.object; const mod = o.module; const struct_ty = struct_ptr_ty.childType(mod); @@ -9892,26 +10436,25 @@ pub const FuncGen = struct { // Offset our operand pointer by the correct number of bytes. const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; - const byte_llvm_ty = self.context.intType(8); - const llvm_usize = try o.lowerType(Type.usize); - const llvm_index = llvm_usize.constInt(byte_offset, .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return self.builder.buildInBoundsGEP(byte_llvm_ty, struct_ptr, &indices, indices.len, ""); + const usize_ty = try o.lowerType(Type.usize); + const llvm_index = try o.builder.intValue(usize_ty, byte_offset); + return self.wip.gep(.inbounds, .i8, struct_ptr, &.{llvm_index}, ""); }, else => { const struct_llvm_ty = try o.lowerPtrElemTy(struct_ty); if (llvmField(struct_ty, field_index, mod)) |llvm_field| { - return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); + return self.wip.gepStruct(struct_llvm_ty, struct_ptr, llvm_field.index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_u32 = self.context.intType(32); - const llvm_index = llvm_u32.constInt(@intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); + const llvm_index = try o.builder.intValue( + try o.lowerType(Type.usize), + @intFromBool(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), + ); + return self.wip.gep(.inbounds, struct_llvm_ty, struct_ptr, &.{llvm_index}, ""); } }, }, @@ -9920,126 +10463,128 @@ pub const FuncGen = struct { if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @intFromBool(layout.tag_align >= layout.payload_align); const union_llvm_ty = try o.lowerType(struct_ty); - const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); - return union_field_ptr; + return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, ""); }, else => unreachable, } } - fn getIntrinsic(fg: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value { + fn getIntrinsic( + fg: *FuncGen, + name: []const u8, + types: []const Builder.Type, + ) Allocator.Error!*llvm.Value { + const o = fg.dg.object; const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); - const o = fg.dg.object; - return o.llvm_module.getIntrinsicDeclaration(id, types.ptr, types.len); + const llvm_types = try o.gpa.alloc(*llvm.Type, types.len); + defer o.gpa.free(llvm_types); + for (llvm_types, types) |*llvm_type, ty| llvm_type.* = ty.toLlvm(&o.builder); + return o.llvm_module.getIntrinsicDeclaration(id, llvm_types.ptr, llvm_types.len); } /// Load a by-ref type by constructing a new alloca and performing a memcpy. fn loadByRef( fg: *FuncGen, - ptr: *llvm.Value, + ptr: Builder.Value, pointee_type: Type, - ptr_alignment: u32, + ptr_alignment: Builder.Alignment, is_volatile: bool, - ) !*llvm.Value { + ) !Builder.Value { const o = fg.dg.object; const mod = o.module; const pointee_llvm_ty = try o.lowerType(pointee_type); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); - const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const result_align = Builder.Alignment.fromByteUnits( + @max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)), + ); + const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align); + const usize_ty = try o.lowerType(Type.usize); const size_bytes = pointee_type.abiSize(mod); - _ = fg.builder.buildMemCpy( - result_ptr, - result_align, - ptr, - ptr_alignment, - llvm_usize.constInt(size_bytes, .False), + _ = (try fg.wip.unimplemented(.void, "")).finish(fg.builder.buildMemCpy( + result_ptr.toLlvm(&fg.wip), + @intCast(result_align.toByteUnits() orelse 0), + ptr.toLlvm(&fg.wip), + @intCast(ptr_alignment.toByteUnits() orelse 0), + (try o.builder.intConst(usize_ty, size_bytes)).toLlvm(&o.builder), is_volatile, - ); + ), &fg.wip); return result_ptr; } /// This function always performs a copy. For isByRef=true types, it creates a new /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. - fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { + fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { const o = self.dg.object; const mod = o.module; const info = ptr_ty.ptrInfo(mod); const elem_ty = info.child.toType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; - const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse - elem_ty.abiAlignment(mod))); - const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); + const ptr_alignment = Builder.Alignment.fromByteUnits( + info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod), + ); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); + const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); const vec_elem_ty = try o.lowerType(elem_ty); - const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); + const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); - loaded_vector.setAlignment(ptr_alignment); - loaded_vector.setVolatile(ptr_volatile); - - return self.builder.buildExtractElement(loaded_vector, index_u32, ""); + const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); + return self.wip.extractElement(loaded_vector, index_u32, ""); } if (info.packed_offset.host_size == 0) { if (isByRef(elem_ty, mod)) { return self.loadByRef(ptr, elem_ty, ptr_alignment, info.flags.is_volatile); } - const elem_llvm_ty = try o.lowerType(elem_ty); - const llvm_inst = self.builder.buildLoad(elem_llvm_ty, ptr, ""); - llvm_inst.setAlignment(ptr_alignment); - llvm_inst.setVolatile(ptr_volatile); - return llvm_inst; + return self.wip.load(ptr_kind, try o.lowerType(elem_ty), ptr, ptr_alignment, ""); } - const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8); - const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); - containing_int.setAlignment(ptr_alignment); - containing_int.setVolatile(ptr_volatile); + const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); + const containing_int = try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); - const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); - const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False); - const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); + const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const shift_amt = try o.builder.intValue(containing_int_ty, info.packed_offset.bit_offset); + const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(elem_ty); if (isByRef(elem_ty, mod)) { - const result_align = elem_ty.abiAlignment(mod); - const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); + const result_align = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align); - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - const store_inst = self.builder.buildStore(truncated_int, result_ptr); - store_inst.setAlignment(result_align); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + _ = try self.wip.store(.normal, truncated_int, result_ptr, result_align); return result_ptr; } if (elem_ty.zigTypeTag(mod) == .Float or elem_ty.zigTypeTag(mod) == .Vector) { - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); } if (elem_ty.isPtrAtRuntime(mod)) { - const same_size_int = self.context.intType(elem_bits); - const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); - return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); + const same_size_int = try o.builder.intType(@intCast(elem_bits)); + const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); + return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, ""); } - return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); + return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, ""); } fn store( self: *FuncGen, - ptr: *llvm.Value, + ptr: Builder.Value, ptr_ty: Type, - elem: *llvm.Value, - ordering: llvm.AtomicOrdering, + elem: Builder.Value, + ordering: Builder.AtomicOrdering, ) !void { const o = self.dg.object; const mod = o.module; @@ -10048,124 +10593,115 @@ pub const FuncGen = struct { if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const ptr_alignment = ptr_ty.ptrAlignment(mod); - const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile); + const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)); + const ptr_kind: Builder.MemoryAccessKind = switch (info.flags.is_volatile) { + false => .normal, + true => .@"volatile", + }; assert(info.flags.vector_index != .runtime); if (info.flags.vector_index != .none) { - const index_u32 = self.context.intType(32).constInt(@intFromEnum(info.flags.vector_index), .False); + const index_u32 = try o.builder.intValue(.i32, @intFromEnum(info.flags.vector_index)); const vec_elem_ty = try o.lowerType(elem_ty); - const vec_ty = vec_elem_ty.vectorType(info.packed_offset.host_size); + const vec_ty = try o.builder.vectorType(.normal, info.packed_offset.host_size, vec_elem_ty); - const loaded_vector = self.builder.buildLoad(vec_ty, ptr, ""); - loaded_vector.setAlignment(ptr_alignment); - loaded_vector.setVolatile(ptr_volatile); + const loaded_vector = try self.wip.load(ptr_kind, vec_ty, ptr, ptr_alignment, ""); - const modified_vector = self.builder.buildInsertElement(loaded_vector, elem, index_u32, ""); + const modified_vector = try self.wip.insertElement(loaded_vector, elem, index_u32, ""); - const store_inst = self.builder.buildStore(modified_vector, ptr); - assert(ordering == .NotAtomic); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + assert(ordering == .none); + _ = try self.wip.store(ptr_kind, modified_vector, ptr, ptr_alignment); return; } if (info.packed_offset.host_size != 0) { - const int_elem_ty = self.context.intType(info.packed_offset.host_size * 8); - const containing_int = self.builder.buildLoad(int_elem_ty, ptr, ""); - assert(ordering == .NotAtomic); - containing_int.setAlignment(ptr_alignment); - containing_int.setVolatile(ptr_volatile); - const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod))); - const containing_int_ty = containing_int.typeOf(); - const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False); + const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8)); + assert(ordering == .none); + const containing_int = + try self.wip.load(ptr_kind, containing_int_ty, ptr, ptr_alignment, ""); + const elem_bits = ptr_ty.childType(mod).bitSize(mod); + const shift_amt = try o.builder.intConst(containing_int_ty, info.packed_offset.bit_offset); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store - const value_bits_type = self.context.intType(elem_bits); + const value_bits_type = try o.builder.intType(@intCast(elem_bits)); const value_bits = if (elem_ty.isPtrAtRuntime(mod)) - self.builder.buildPtrToInt(elem, value_bits_type, "") + try self.wip.cast(.ptrtoint, elem, value_bits_type, "") else - self.builder.buildBitCast(elem, value_bits_type, ""); - - var mask_val = value_bits_type.constAllOnes(); - mask_val = mask_val.constZExt(containing_int_ty); - mask_val = mask_val.constShl(shift_amt); - mask_val = mask_val.constNot(); - - const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, ""); - const extended_value = self.builder.buildZExt(value_bits, containing_int_ty, ""); - const shifted_value = self.builder.buildShl(extended_value, shift_amt, ""); - const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, ""); - - const store_inst = self.builder.buildStore(ored_value, ptr); - assert(ordering == .NotAtomic); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + try self.wip.cast(.bitcast, elem, value_bits_type, ""); + + var mask_val = try o.builder.intConst(value_bits_type, -1); + mask_val = try o.builder.castConst(.zext, mask_val, containing_int_ty); + mask_val = try o.builder.binConst(.shl, mask_val, shift_amt); + mask_val = + try o.builder.binConst(.xor, mask_val, try o.builder.intConst(containing_int_ty, -1)); + + const anded_containing_int = + try self.wip.bin(.@"and", containing_int, mask_val.toValue(), ""); + const extended_value = try self.wip.cast(.zext, value_bits, containing_int_ty, ""); + const shifted_value = try self.wip.bin(.shl, extended_value, shift_amt.toValue(), ""); + const ored_value = try self.wip.bin(.@"or", shifted_value, anded_containing_int, ""); + + assert(ordering == .none); + _ = try self.wip.store(ptr_kind, ored_value, ptr, ptr_alignment); return; } if (!isByRef(elem_ty, mod)) { - const store_inst = self.builder.buildStore(elem, ptr); - store_inst.setOrdering(ordering); - store_inst.setAlignment(ptr_alignment); - store_inst.setVolatile(ptr_volatile); + _ = try self.wip.storeAtomic(ptr_kind, elem, ptr, self.sync_scope, ordering, ptr_alignment); return; } - assert(ordering == .NotAtomic); + assert(ordering == .none); const size_bytes = elem_ty.abiSize(mod); - _ = self.builder.buildMemCpy( - ptr, - ptr_alignment, - elem, + _ = (try self.wip.unimplemented(.void, "")).finish(self.builder.buildMemCpy( + ptr.toLlvm(&self.wip), + @intCast(ptr_alignment.toByteUnits() orelse 0), + elem.toLlvm(&self.wip), elem_ty.abiAlignment(mod), - self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), + (try o.builder.intConst(try o.lowerType(Type.usize), size_bytes)).toLlvm(&o.builder), info.flags.is_volatile, - ); + ), &self.wip); } - fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void { + fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const o = fg.dg.object; - const target = o.module.getTarget(); - const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const zero = usize_llvm_ty.constInt(0, .False); - const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False); - const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, ""); - _ = valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); + const usize_ty = try o.lowerType(Type.usize); + const zero = try o.builder.intValue(usize_ty, 0); + const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED); + const ptr_as_usize = try fg.wip.cast(.ptrtoint, ptr, usize_ty, ""); + _ = try valgrindClientRequest(fg, zero, req, ptr_as_usize, len, zero, zero, zero); } fn valgrindClientRequest( fg: *FuncGen, - default_value: *llvm.Value, - request: *llvm.Value, - a1: *llvm.Value, - a2: *llvm.Value, - a3: *llvm.Value, - a4: *llvm.Value, - a5: *llvm.Value, - ) *llvm.Value { + default_value: Builder.Value, + request: Builder.Value, + a1: Builder.Value, + a2: Builder.Value, + a3: Builder.Value, + a4: Builder.Value, + a5: Builder.Value, + ) Allocator.Error!Builder.Value { const o = fg.dg.object; const mod = o.module; const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; - const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod))); + const llvm_usize = try o.lowerType(Type.usize); + const usize_alignment = Builder.Alignment.fromByteUnits(Type.usize.abiAlignment(mod)); - const array_llvm_ty = usize_llvm_ty.arrayType(6); - const array_ptr = fg.valgrind_client_request_array orelse a: { - const array_ptr = fg.buildAlloca(array_llvm_ty, usize_alignment); + const array_llvm_ty = try o.builder.arrayType(6, llvm_usize); + const array_ptr = if (fg.valgrind_client_request_array == .none) a: { + const array_ptr = try fg.buildAlloca(array_llvm_ty, usize_alignment); fg.valgrind_client_request_array = array_ptr; break :a array_ptr; - }; - const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 }; - const zero = usize_llvm_ty.constInt(0, .False); + } else fg.valgrind_client_request_array; + const array_elements = [_]Builder.Value{ request, a1, a2, a3, a4, a5 }; + const zero = try o.builder.intValue(llvm_usize, 0); for (array_elements, 0..) |elem, i| { - const indexes = [_]*llvm.Value{ - zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False), - }; - const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, ""); - const store_inst = fg.builder.buildStore(elem, elem_ptr); - store_inst.setAlignment(usize_alignment); + const elem_ptr = try fg.wip.gep(.inbounds, array_llvm_ty, array_ptr, &.{ + zero, try o.builder.intValue(llvm_usize, i), + }, ""); + _ = try fg.wip.store(.normal, elem, elem_ptr, usize_alignment); } const arch_specific: struct { @@ -10199,10 +10735,9 @@ pub const FuncGen = struct { else => unreachable, }; - const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, ""); - const args = [_]*llvm.Value{ array_ptr_as_usize, default_value }; - const param_types = [_]*llvm.Type{ usize_llvm_ty, usize_llvm_ty }; - const fn_llvm_ty = llvm.functionType(usize_llvm_ty, ¶m_types, args.len, .False); + const fn_llvm_ty = (try o.builder.fnType(llvm_usize, &(.{llvm_usize} ** 2), .normal)).toLlvm(&o.builder); + const array_ptr_as_usize = try fg.wip.cast(.ptrtoint, array_ptr, llvm_usize, ""); + const args = [_]*llvm.Value{ array_ptr_as_usize.toLlvm(&fg.wip), default_value.toLlvm(&fg.wip) }; const asm_fn = llvm.getInlineAsm( fn_llvm_ty, arch_specific.template.ptr, @@ -10215,14 +10750,9 @@ pub const FuncGen = struct { .False, // can throw ); - const call = fg.builder.buildCall( - fn_llvm_ty, - asm_fn, - &args, - args.len, - .C, - .Auto, - "", + const call = (try fg.wip.unimplemented(llvm_usize, "")).finish( + fg.builder.buildCall(fn_llvm_ty, asm_fn, &args, args.len, .C, .Auto, ""), + &fg.wip, ); return call; } @@ -10432,14 +10962,14 @@ fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { } } -fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrdering { +fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering { return switch (atomic_order) { - .Unordered => .Unordered, - .Monotonic => .Monotonic, - .Acquire => .Acquire, - .Release => .Release, - .AcqRel => .AcquireRelease, - .SeqCst => .SequentiallyConsistent, + .Unordered => .unordered, + .Monotonic => .monotonic, + .Acquire => .acquire, + .Release => .release, + .AcqRel => .acq_rel, + .SeqCst => .seq_cst, }; } @@ -10494,45 +11024,67 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca } /// Convert a zig-address space to an llvm address space. -fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) c_uint { +fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace { + for (llvmAddrSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm; + unreachable; +} + +const AddrSpaceInfo = struct { + zig: ?std.builtin.AddressSpace, + llvm: Builder.AddrSpace, + non_integral: bool = false, + size: ?u16 = null, + abi: ?u16 = null, + pref: ?u16 = null, + idx: ?u16 = null, + force_in_data_layout: bool = false, +}; +fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo { return switch (target.cpu.arch) { - .x86, .x86_64 => switch (address_space) { - .generic => llvm.address_space.default, - .gs => llvm.address_space.x86.gs, - .fs => llvm.address_space.x86.fs, - .ss => llvm.address_space.x86.ss, - else => unreachable, + .x86, .x86_64 => &.{ + .{ .zig = .generic, .llvm = .default }, + .{ .zig = .gs, .llvm = Builder.AddrSpace.x86.gs }, + .{ .zig = .fs, .llvm = Builder.AddrSpace.x86.fs }, + .{ .zig = .ss, .llvm = Builder.AddrSpace.x86.ss }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_sptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr32_uptr, .size = 32, .abi = 32, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.x86.ptr64, .size = 64, .abi = 64, .force_in_data_layout = true }, }, - .nvptx, .nvptx64 => switch (address_space) { - .generic => llvm.address_space.default, - .global => llvm.address_space.nvptx.global, - .constant => llvm.address_space.nvptx.constant, - .param => llvm.address_space.nvptx.param, - .shared => llvm.address_space.nvptx.shared, - .local => llvm.address_space.nvptx.local, - else => unreachable, + .nvptx, .nvptx64 => &.{ + .{ .zig = .generic, .llvm = .default }, + .{ .zig = .global, .llvm = Builder.AddrSpace.nvptx.global }, + .{ .zig = .constant, .llvm = Builder.AddrSpace.nvptx.constant }, + .{ .zig = .param, .llvm = Builder.AddrSpace.nvptx.param }, + .{ .zig = .shared, .llvm = Builder.AddrSpace.nvptx.shared }, + .{ .zig = .local, .llvm = Builder.AddrSpace.nvptx.local }, }, - .amdgcn => switch (address_space) { - .generic => llvm.address_space.amdgpu.flat, - .global => llvm.address_space.amdgpu.global, - .constant => llvm.address_space.amdgpu.constant, - .shared => llvm.address_space.amdgpu.local, - .local => llvm.address_space.amdgpu.private, - else => unreachable, + .amdgcn => &.{ + .{ .zig = .generic, .llvm = Builder.AddrSpace.amdgpu.flat, .force_in_data_layout = true }, + .{ .zig = .global, .llvm = Builder.AddrSpace.amdgpu.global, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.region, .size = 32, .abi = 32 }, + .{ .zig = .shared, .llvm = Builder.AddrSpace.amdgpu.local, .size = 32, .abi = 32 }, + .{ .zig = .constant, .llvm = Builder.AddrSpace.amdgpu.constant, .force_in_data_layout = true }, + .{ .zig = .local, .llvm = Builder.AddrSpace.amdgpu.private, .size = 32, .abi = 32 }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.constant_32bit, .size = 32, .abi = 32 }, + .{ .zig = null, .llvm = Builder.AddrSpace.amdgpu.buffer_fat_pointer, .non_integral = true }, }, - .avr => switch (address_space) { - .generic => llvm.address_space.default, - .flash => llvm.address_space.avr.flash, - .flash1 => llvm.address_space.avr.flash1, - .flash2 => llvm.address_space.avr.flash2, - .flash3 => llvm.address_space.avr.flash3, - .flash4 => llvm.address_space.avr.flash4, - .flash5 => llvm.address_space.avr.flash5, - else => unreachable, + .avr => &.{ + .{ .zig = .generic, .llvm = .default, .abi = 8 }, + .{ .zig = .flash, .llvm = Builder.AddrSpace.avr.flash, .abi = 8 }, + .{ .zig = .flash1, .llvm = Builder.AddrSpace.avr.flash1, .abi = 8 }, + .{ .zig = .flash2, .llvm = Builder.AddrSpace.avr.flash2, .abi = 8 }, + .{ .zig = .flash3, .llvm = Builder.AddrSpace.avr.flash3, .abi = 8 }, + .{ .zig = .flash4, .llvm = Builder.AddrSpace.avr.flash4, .abi = 8 }, + .{ .zig = .flash5, .llvm = Builder.AddrSpace.avr.flash5, .abi = 8 }, }, - else => switch (address_space) { - .generic => llvm.address_space.default, - else => unreachable, + .wasm32, .wasm64 => &.{ + .{ .zig = .generic, .llvm = .default, .force_in_data_layout = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.variable, .non_integral = true }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.externref, .non_integral = true, .size = 8, .abi = 8 }, + .{ .zig = null, .llvm = Builder.AddrSpace.wasm.funcref, .non_integral = true, .size = 8, .abi = 8 }, + }, + else => &.{ + .{ .zig = .generic, .llvm = .default }, }, }; } @@ -10541,30 +11093,30 @@ fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Targe /// different address, space and then cast back to the generic address space. /// For example, on GPUs local variable declarations must be generated into the local address space. /// This function returns the address space local values should be generated into. -fn llvmAllocaAddressSpace(target: std.Target) c_uint { +fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace { return switch (target.cpu.arch) { // On amdgcn, locals should be generated into the private address space. // To make Zig not impossible to use, these are then converted to addresses in the // generic address space and treates as regular pointers. This is the way that HIP also does it. - .amdgcn => llvm.address_space.amdgpu.private, - else => llvm.address_space.default, + .amdgcn => Builder.AddrSpace.amdgpu.private, + else => .default, }; } /// On some targets, global values that are in the generic address space must be generated into a /// different address space, and then cast back to the generic address space. -fn llvmDefaultGlobalAddressSpace(target: std.Target) c_uint { +fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace { return switch (target.cpu.arch) { // On amdgcn, globals must be explicitly allocated and uploaded so that the program can access // them. - .amdgcn => llvm.address_space.amdgpu.global, - else => llvm.address_space.default, + .amdgcn => Builder.AddrSpace.amdgpu.global, + else => .default, }; } /// Return the actual address space that a value should be stored in if its a global address space. /// When a value is placed in the resulting address space, it needs to be cast back into wanted_address_space. -fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) c_uint { +fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace { return switch (wanted_address_space) { .generic => llvmDefaultGlobalAddressSpace(target), else => |as| toLlvmAddressSpace(as, target), @@ -10694,28 +11246,20 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (return_type.isError(mod)) { - return o.lowerType(Type.anyerror); - } else { - return o.context.voidType(); - } + return if (return_type.isError(mod)) Builder.Type.err_int else .void; } const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => { - if (isByRef(return_type, mod)) { - return o.context.voidType(); - } else { - return o.lowerType(return_type); - } - }, + .Unspecified, + .Inline, + => return if (isByRef(return_type, mod)) .void else o.lowerType(return_type), .C => { switch (target.cpu.arch) { .mips, .mipsel => return o.lowerType(return_type), @@ -10729,50 +11273,37 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { } const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { - return o.context.voidType(); + return .void; } assert(classes[0] == .direct and classes[1] == .none); const scalar_type = wasm_c_abi.scalarType(return_type, mod); - const abi_size = scalar_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(scalar_type.abiSize(mod) * 8)); }, .aarch64, .aarch64_be => { switch (aarch64_c_abi.classifyType(return_type, mod)) { - .memory => return o.context.voidType(), + .memory => return .void, .float_array => return o.lowerType(return_type), .byval => return o.lowerType(return_type), - .integer => { - const bit_size = return_type.bitSize(mod); - return o.context.intType(@as(c_uint, @intCast(bit_size))); - }, - .double_integer => return o.context.intType(64).arrayType(2), + .integer => return o.builder.intType(@intCast(return_type.bitSize(mod))), + .double_integer => return o.builder.arrayType(2, .i64), } }, .arm, .armeb => { switch (arm_c_abi.classifyType(return_type, mod, .ret)) { - .memory, .i64_array => return o.context.voidType(), - .i32_array => |len| if (len == 1) { - return o.context.intType(32); - } else { - return o.context.voidType(); - }, + .memory, .i64_array => return .void, + .i32_array => |len| return if (len == 1) .i32 else .void, .byval => return o.lowerType(return_type), } }, .riscv32, .riscv64 => { switch (riscv_c_abi.classifyType(return_type, mod)) { - .memory => return o.context.voidType(), + .memory => return .void, .integer => { - const bit_size = return_type.bitSize(mod); - return o.context.intType(@as(c_uint, @intCast(bit_size))); + return o.builder.intType(@intCast(return_type.bitSize(mod))); }, .double_integer => { - var llvm_types_buffer: [2]*llvm.Type = .{ - o.context.intType(64), - o.context.intType(64), - }; - return o.context.structType(&llvm_types_buffer, 2, .False); + return o.builder.structType(.normal, &.{ .i64, .i64 }); }, .byval => return o.lowerType(return_type), } @@ -10783,18 +11314,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { }, .Win64 => return lowerWin64FnRetTy(o, fn_info), .SysV => return lowerSystemVFnRetTy(o, fn_info), - .Stdcall => { - if (isScalar(mod, return_type)) { - return o.lowerType(return_type); - } else { - return o.context.voidType(); - } - }, + .Stdcall => return if (isScalar(mod, return_type)) o.lowerType(return_type) else .void, else => return o.lowerType(return_type), } } -fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); switch (x86_64_abi.classifyWindows(return_type, mod)) { @@ -10802,53 +11327,48 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { if (isScalar(mod, return_type)) { return o.lowerType(return_type); } else { - const abi_size = return_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); } }, - .win_i128 => return o.context.intType(64).vectorType(2), - .memory => return o.context.voidType(), + .win_i128 => return o.builder.vectorType(.normal, 2, .i64), + .memory => return .void, .sse => return o.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type { +fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type { const mod = o.module; const return_type = fn_info.return_type.toType(); if (isScalar(mod, return_type)) { return o.lowerType(return_type); } const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); - if (classes[0] == .memory) { - return o.context.voidType(); - } - var llvm_types_buffer: [8]*llvm.Type = undefined; - var llvm_types_index: u32 = 0; + if (classes[0] == .memory) return .void; + var types_index: u32 = 0; + var types_buffer: [8]Builder.Type = undefined; for (classes) |class| { switch (class) { .integer => { - llvm_types_buffer[llvm_types_index] = o.context.intType(64); - llvm_types_index += 1; + types_buffer[types_index] = .i64; + types_index += 1; }, .sse, .sseup => { - llvm_types_buffer[llvm_types_index] = o.context.doubleType(); - llvm_types_index += 1; + types_buffer[types_index] = .double; + types_index += 1; }, .float => { - llvm_types_buffer[llvm_types_index] = o.context.floatType(); - llvm_types_index += 1; + types_buffer[types_index] = .float; + types_index += 1; }, .float_combine => { - llvm_types_buffer[llvm_types_index] = o.context.floatType().vectorType(2); - llvm_types_index += 1; + types_buffer[types_index] = try o.builder.vectorType(.normal, 2, .float); + types_index += 1; }, .x87 => { - if (llvm_types_index != 0 or classes[2] != .none) { - return o.context.voidType(); - } - llvm_types_buffer[llvm_types_index] = o.context.x86FP80Type(); - llvm_types_index += 1; + if (types_index != 0 or classes[2] != .none) return .void; + types_buffer[types_index] = .x86_fp80; + types_index += 1; }, .x87up => continue, .complex_x87 => { @@ -10860,10 +11380,9 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = return_type.abiSize(mod); - return o.context.intType(@as(c_uint, @intCast(abi_size * 8))); + return o.builder.intType(@intCast(return_type.abiSize(mod) * 8)); } - return o.context.structType(&llvm_types_buffer, llvm_types_index, .False); + return o.builder.structType(.normal, types_buffer[0..types_index]); } const ParamTypeIterator = struct { @@ -10871,8 +11390,8 @@ const ParamTypeIterator = struct { fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, - llvm_types_len: u32, - llvm_types_buffer: [8]*llvm.Type, + types_len: u32, + types_buffer: [8]Builder.Type, byval_attr: bool, const Lowering = union(enum) { @@ -10889,7 +11408,7 @@ const ParamTypeIterator = struct { i64_array: u8, }; - pub fn next(it: *ParamTypeIterator) ?Lowering { + pub fn next(it: *ParamTypeIterator) Allocator.Error!?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; const mod = it.object.module; const ip = &mod.intern_pool; @@ -10899,7 +11418,7 @@ const ParamTypeIterator = struct { } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. - pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) ?Lowering { + pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) Allocator.Error!?Lowering { const mod = it.object.module; const ip = &mod.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { @@ -10913,7 +11432,7 @@ const ParamTypeIterator = struct { } } - fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { + fn nextInner(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { const mod = it.object.module; const target = mod.getTarget(); @@ -10968,8 +11487,8 @@ const ParamTypeIterator = struct { .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, .integer => { - it.llvm_types_len = 1; - it.llvm_types_buffer[0] = it.object.context.intType(64); + it.types_len = 1; + it.types_buffer[0] = .i64; return .multiple_llvm_types; }, .double_integer => return Lowering{ .i64_array = 2 }, @@ -11063,7 +11582,7 @@ const ParamTypeIterator = struct { } } - fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { + fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering { const mod = it.object.module; const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { @@ -11077,25 +11596,25 @@ const ParamTypeIterator = struct { it.llvm_index += 1; return .byval; } - var llvm_types_buffer: [8]*llvm.Type = undefined; - var llvm_types_index: u32 = 0; + var types_index: u32 = 0; + var types_buffer: [8]Builder.Type = undefined; for (classes) |class| { switch (class) { .integer => { - llvm_types_buffer[llvm_types_index] = it.object.context.intType(64); - llvm_types_index += 1; + types_buffer[types_index] = .i64; + types_index += 1; }, .sse, .sseup => { - llvm_types_buffer[llvm_types_index] = it.object.context.doubleType(); - llvm_types_index += 1; + types_buffer[types_index] = .double; + types_index += 1; }, .float => { - llvm_types_buffer[llvm_types_index] = it.object.context.floatType(); - llvm_types_index += 1; + types_buffer[types_index] = .float; + types_index += 1; }, .float_combine => { - llvm_types_buffer[llvm_types_index] = it.object.context.floatType().vectorType(2); - llvm_types_index += 1; + types_buffer[types_index] = try it.object.builder.vectorType(.normal, 2, .float); + types_index += 1; }, .x87 => { it.zig_index += 1; @@ -11117,9 +11636,9 @@ const ParamTypeIterator = struct { it.llvm_index += 1; return .abi_sized_int; } - it.llvm_types_buffer = llvm_types_buffer; - it.llvm_types_len = llvm_types_index; - it.llvm_index += llvm_types_index; + it.types_len = types_index; + it.types_buffer = types_buffer; + it.llvm_index += types_index; it.zig_index += 1; return .multiple_llvm_types; } @@ -11131,8 +11650,8 @@ fn iterateParamTypes(object: *Object, fn_info: InternPool.Key.FuncType) ParamTyp .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, - .llvm_types_buffer = undefined, - .llvm_types_len = 0, + .types_len = 0, + .types_buffer = undefined, .byval_attr = false, }; } @@ -11355,23 +11874,23 @@ const AnnotatedDITypePtr = enum(usize) { fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); assert(@as(u1, @truncate(addr)) == 0); - return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1)); + return @enumFromInt(addr | 1); } fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); - return @as(AnnotatedDITypePtr, @enumFromInt(addr)); + return @enumFromInt(addr); } fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr { const addr = @intFromPtr(di_type); const bit = @intFromBool(resolve == .fwd); - return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit)); + return @enumFromInt(addr | bit); } fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType { const fixed_addr = @intFromEnum(self) & ~@as(usize, 1); - return @as(*llvm.DIType, @ptrFromInt(fixed_addr)); + return @ptrFromInt(fixed_addr); } fn isFwdOnly(self: AnnotatedDITypePtr) bool { @@ -11401,56 +11920,39 @@ fn compilerRtIntBits(bits: u16) u16 { } fn buildAllocaInner( - context: *llvm.Context, - builder: *llvm.Builder, - llvm_func: *llvm.Value, + wip: *Builder.WipFunction, di_scope_non_null: bool, - llvm_ty: *llvm.Type, - maybe_alignment: ?c_uint, + llvm_ty: Builder.Type, + alignment: Builder.Alignment, target: std.Target, -) *llvm.Value { +) Allocator.Error!Builder.Value { const address_space = llvmAllocaAddressSpace(target); const alloca = blk: { - const prev_block = builder.getInsertBlock(); - const prev_debug_location = builder.getCurrentDebugLocation2(); + const prev_cursor = wip.cursor; + const prev_debug_location = wip.llvm.builder.getCurrentDebugLocation2(); defer { - builder.positionBuilderAtEnd(prev_block); - if (di_scope_non_null) { - builder.setCurrentDebugLocation2(prev_debug_location); - } - } - - const entry_block = llvm_func.getFirstBasicBlock().?; - if (entry_block.getFirstInstruction()) |first_inst| { - builder.positionBuilder(entry_block, first_inst); - } else { - builder.positionBuilderAtEnd(entry_block); + wip.cursor = prev_cursor; + if (wip.cursor.block == .entry) wip.cursor.instruction += 1; + if (di_scope_non_null) wip.llvm.builder.setCurrentDebugLocation2(prev_debug_location); } - builder.clearCurrentDebugLocation(); - break :blk builder.buildAllocaInAddressSpace(llvm_ty, address_space, ""); + wip.cursor = .{ .block = .entry }; + wip.llvm.builder.clearCurrentDebugLocation(); + break :blk try wip.alloca(.normal, llvm_ty, .none, alignment, address_space, ""); }; - if (maybe_alignment) |alignment| { - alloca.setAlignment(alignment); - } - // The pointer returned from this function should have the generic address space, // if this isn't the case then cast it to the generic address space. - if (address_space != llvm.address_space.default) { - return builder.buildAddrSpaceCast(alloca, context.pointerType(llvm.address_space.default), ""); - } - - return alloca; + return wip.conv(.unneeded, alloca, .ptr, ""); } fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { - return @intFromBool(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); + return @intFromBool(Type.err_int.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 { - return @intFromBool(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); + return @intFromBool(Type.err_int.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location |
